serial: "fix" buffer truncation bug
DMA just uses one buffer rather than two now lmao
This commit is contained in:
parent
d1805c2f00
commit
60ec99692c
11 changed files with 75 additions and 90 deletions
|
@ -7,14 +7,12 @@ static ATOM(atomic_context);
|
|||
|
||||
void atomic_enter(void)
|
||||
{
|
||||
if (atom_get(&atomic_context) == 1)
|
||||
__asm__ volatile("cpsid i");
|
||||
atom_get(&atomic_context);
|
||||
}
|
||||
|
||||
void atomic_leave(void)
|
||||
{
|
||||
if (atom_put(&atomic_context) == 0)
|
||||
__asm__ volatile("cpsie i");
|
||||
atom_put(&atomic_context);
|
||||
}
|
||||
|
||||
int is_atomic(void)
|
||||
|
|
|
@ -4,10 +4,15 @@
|
|||
|
||||
#include <toolchain.h>
|
||||
|
||||
/** @brief Make the next `STREX` fail (invoke before leaving an irq). */
|
||||
__always_inline void __clrex(void)
|
||||
__always_inline void __irq_enter(void)
|
||||
{
|
||||
__asm__ volatile( "\tclrex\n" ::: );
|
||||
__asm__ volatile("cpsid i");
|
||||
}
|
||||
|
||||
__always_inline void __irq_leave(void)
|
||||
{
|
||||
__asm__ volatile("clrex");
|
||||
__asm__ volatile("cpsie i");
|
||||
}
|
||||
|
||||
/** Reset exception handler */
|
||||
|
|
|
@ -9,12 +9,11 @@
|
|||
|
||||
/** Architecture-specific extension of `struct serial_device` */
|
||||
struct arch_serial_device {
|
||||
/** should always match REG_UART_PDC_TPR */
|
||||
struct dmabuf *tx_current;
|
||||
/** should always match REG_UART_PDC_TNPR */
|
||||
struct dmabuf *tx_next;
|
||||
|
||||
struct serial_device device;
|
||||
|
||||
/* TODO: Use two buffers per direction as supported by hardware */
|
||||
struct dmabuf *txbuf;
|
||||
/* TODO: Use DMA for TX as well */
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -17,13 +17,12 @@
|
|||
#include <string.h>
|
||||
|
||||
struct arch_serial_device arch_serial_default_device = {
|
||||
.tx_current = NULL,
|
||||
.tx_next = NULL,
|
||||
.device = {
|
||||
.rx = NULL,
|
||||
.id = 0,
|
||||
.baud = 0,
|
||||
},
|
||||
.txbuf = NULL,
|
||||
};
|
||||
struct serial_device *serial_default_device = &arch_serial_default_device.device;
|
||||
|
||||
|
@ -48,7 +47,7 @@ int arch_serial_init(struct serial_device *dev)
|
|||
/* no parity, normal mode */
|
||||
UART->UART_MR = UART_MR_PAR_NO | UART_MR_CHMODE_NORMAL;
|
||||
|
||||
/* From Atmel Datasheet: baud rate = MCK / (REG_UART_BRGR * 16) */
|
||||
/* From Atmel Datasheet: baud rate = MCK / (UART_BRGR * 16) */
|
||||
UART->UART_BRGR = UART_BRGR_CD(( SystemCoreClock / (uint32_t)dev->baud ) >> 4);
|
||||
|
||||
/* choose the events we want an interrupt on */
|
||||
|
@ -81,49 +80,32 @@ void arch_serial_exit(struct serial_device *dev)
|
|||
|
||||
ssize_t arch_serial_write(struct serial_device *dev, const void *buf, size_t len)
|
||||
{
|
||||
int ret;
|
||||
struct arch_serial_device *arch_dev = to_arch_serial_device(dev);
|
||||
|
||||
if (len > 0xffff)
|
||||
return -E2BIG;
|
||||
|
||||
if (arch_dev->txbuf != NULL)
|
||||
return -EBUSY;
|
||||
|
||||
struct dmabuf *dmabuf = dmabuf_create(&dev->device, len);
|
||||
if (dmabuf == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(dmabuf->data, buf, len);
|
||||
ret = serial_write_dma(dev, dmabuf);
|
||||
dmabuf_put(dmabuf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ssize_t serial_write_dma(struct serial_device *dev, struct dmabuf *buf)
|
||||
{
|
||||
uint16_t len;
|
||||
struct arch_serial_device *arch_dev = to_arch_serial_device(dev);
|
||||
|
||||
if (arch_dev->tx_next != NULL)
|
||||
return -EBUSY;
|
||||
|
||||
dmabuf_get(buf);
|
||||
|
||||
if (buf->len >= 0xffff)
|
||||
len = 0xffff;
|
||||
else
|
||||
len = (uint16_t)buf->len;
|
||||
|
||||
if (arch_dev->tx_current == NULL) {
|
||||
arch_dev->tx_current = buf;
|
||||
UART->UART_TPR = (uint32_t)buf->data;
|
||||
UART->UART_TCR = len;
|
||||
/* we weren't transmitting, so the interrupt was masked */
|
||||
UART->UART_IER = UART_IER_ENDTX;
|
||||
} else {
|
||||
arch_dev->tx_next = buf;
|
||||
UART->UART_TNPR = (uint32_t)buf->data;
|
||||
UART->UART_TNCR = len;
|
||||
}
|
||||
arch_dev->txbuf = dmabuf;
|
||||
UART->UART_TPR = (uintptr_t)dmabuf->data;
|
||||
UART->UART_TCR = dmabuf->len;
|
||||
UART->UART_IER = UART_IER_TXBUFE;
|
||||
|
||||
return (ssize_t)len;
|
||||
}
|
||||
|
||||
void irq_uart(void)
|
||||
{
|
||||
__irq_enter();
|
||||
|
||||
uint8_t tmp;
|
||||
uint32_t state = UART->UART_SR;
|
||||
|
||||
|
@ -133,32 +115,30 @@ void irq_uart(void)
|
|||
ringbuf_write(arch_serial_default_device.device.rx, &tmp, sizeof(tmp));
|
||||
|
||||
device_kevent_create_and_dispatch(&serial_default_device->device,
|
||||
DEVICE_CHANNEL_IN);
|
||||
DEVICE_KEVENT_RX);
|
||||
}
|
||||
|
||||
/* REG_UART_PDC_TCR has reached zero */
|
||||
if (state & UART_SR_ENDTX) {
|
||||
if (arch_serial_default_device.tx_current != NULL)
|
||||
dmabuf_put(arch_serial_default_device.tx_current);
|
||||
/* UART_TCR has reached zero */
|
||||
if (state & UART_SR_TXBUFE) {
|
||||
dmabuf_put(arch_serial_default_device.txbuf);
|
||||
arch_serial_default_device.txbuf = NULL;
|
||||
|
||||
/* DMA automatically does this to the actual hardware registers */
|
||||
arch_serial_default_device.tx_current = arch_serial_default_device.tx_next;
|
||||
arch_serial_default_device.tx_next = NULL;
|
||||
|
||||
if (arch_serial_default_device.tx_current == NULL)
|
||||
UART->UART_IDR = UART_IDR_ENDTX;
|
||||
UART->UART_IDR = UART_IDR_TXBUFE;
|
||||
|
||||
device_kevent_create_and_dispatch(&serial_default_device->device,
|
||||
DEVICE_CHANNEL_OUT);
|
||||
DEVICE_KEVENT_TX);
|
||||
}
|
||||
|
||||
/* check for error conditions */
|
||||
if ((state & UART_SR_OVRE) || (state & UART_SR_FRAME)) {
|
||||
/* TODO: write some proper error handling routines ffs */
|
||||
UART->UART_CR = UART_CR_RSTSTA;
|
||||
device_kevent_create_and_dispatch(
|
||||
&serial_default_device->device,
|
||||
DEVICE_KEVENT_RX | DEVICE_KEVENT_TX | DEVICE_KEVENT_ERR
|
||||
);
|
||||
}
|
||||
|
||||
__clrex();
|
||||
__irq_leave();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -26,14 +26,16 @@ struct device {
|
|||
|
||||
extern struct kent *devices_kent;
|
||||
|
||||
enum device_channel {
|
||||
DEVICE_CHANNEL_IN,
|
||||
DEVICE_CHANNEL_OUT,
|
||||
enum device_kevent_flags {
|
||||
DEVICE_KEVENT_TX = (1 << 0),
|
||||
DEVICE_KEVENT_RX = (1 << 1),
|
||||
DEVICE_KEVENT_ERR = (1 << 2),
|
||||
DEVICE_KEVENT_DMA = (1 << 3),
|
||||
};
|
||||
|
||||
struct device_kevent {
|
||||
struct kevent kevent;
|
||||
enum device_channel channel;
|
||||
enum device_kevent_flags flags;
|
||||
};
|
||||
|
||||
__always_inline struct device_kevent *kevent_to_device_kevent(struct kevent *event)
|
||||
|
@ -53,7 +55,7 @@ __always_inline struct device *kevent_to_device(struct kevent *event)
|
|||
* @param channel Which channel (in or out) the event applies to
|
||||
* @returns The created event, or `NULL` if out of memory
|
||||
*/
|
||||
struct device_kevent *device_kevent_create(struct device *device, enum device_channel channel);
|
||||
struct device_kevent *device_kevent_create(struct device *device, enum device_kevent_flags flags);
|
||||
|
||||
/**
|
||||
* @brief Convenience wrapper for creating and immediately dispatching a device kevent.
|
||||
|
@ -61,7 +63,7 @@ struct device_kevent *device_kevent_create(struct device *device, enum device_ch
|
|||
* @param device Device the event refers to
|
||||
* @param channel Which channel (in or out) the event applies to
|
||||
*/
|
||||
void device_kevent_create_and_dispatch(struct device *device, enum device_channel channel);
|
||||
void device_kevent_create_and_dispatch(struct device *device, enum device_kevent_flags flags);
|
||||
|
||||
/** Initialize the devices subsystem. */
|
||||
int devices_init(void);
|
||||
|
|
|
@ -34,7 +34,6 @@ enum file_kevent_flags {
|
|||
FILE_KEVENT_WRITE = (1 << 1),
|
||||
FILE_KEVENT_CLOSE = (1 << 2),
|
||||
FILE_KEVENT_EOF = (1 << 3),
|
||||
FILE_KEVENT_UNLOCK = (1 << 4),
|
||||
};
|
||||
|
||||
struct file_kevent {
|
||||
|
|
|
@ -12,11 +12,12 @@ struct list_head {
|
|||
struct list_head *prev;
|
||||
};
|
||||
|
||||
#define LIST_HEAD(name) \
|
||||
struct list_head name = { \
|
||||
.next = &name, \
|
||||
.prev = &name, \
|
||||
}
|
||||
#define LIST_HEAD_INIT(name) { \
|
||||
.next = &(name), \
|
||||
.prev = &(name), \
|
||||
}
|
||||
|
||||
#define LIST_HEAD(name) struct list_head name = LIST_HEAD_INIT(name)
|
||||
|
||||
__always_inline void list_init(struct list_head *head)
|
||||
{
|
||||
|
|
|
@ -146,7 +146,11 @@ __always_inline bool mutex_is_locked(struct mutex *mutex)
|
|||
*
|
||||
* @param name Name of the `struct mutex` that will be defined
|
||||
*/
|
||||
#define MUTEX(name) struct mutex name = { .lock = 0 }
|
||||
#define MUTEX(name) struct mutex name = { \
|
||||
.lock = 0, \
|
||||
.wait_queue_lock = { .lock = 0 }, \
|
||||
.wait_queue = LIST_HEAD_INIT(name.wait_queue) \
|
||||
}
|
||||
|
||||
/** @} */
|
||||
|
||||
|
|
|
@ -57,13 +57,13 @@ static void device_kevent_destroy(struct kent *kent)
|
|||
free(device_kevent);
|
||||
}
|
||||
|
||||
struct device_kevent *device_kevent_create(struct device *device, enum device_channel channel)
|
||||
struct device_kevent *device_kevent_create(struct device *device, enum device_kevent_flags flags)
|
||||
{
|
||||
struct device_kevent *event = atomic_malloc(sizeof(*event));
|
||||
if (event == NULL)
|
||||
return NULL;
|
||||
|
||||
event->channel = channel;
|
||||
event->flags = flags;
|
||||
event->kevent.kind = KEVENT_DEVICE;
|
||||
|
||||
event->kevent.kent.parent = &device->kent;
|
||||
|
@ -77,9 +77,9 @@ struct device_kevent *device_kevent_create(struct device *device, enum device_ch
|
|||
return event;
|
||||
}
|
||||
|
||||
void device_kevent_create_and_dispatch(struct device *device, enum device_channel channel)
|
||||
void device_kevent_create_and_dispatch(struct device *device, enum device_kevent_flags flags)
|
||||
{
|
||||
struct device_kevent *event = device_kevent_create(device, channel);
|
||||
struct device_kevent *event = device_kevent_create(device, flags);
|
||||
if (event != NULL)
|
||||
kevent_dispatch(&event->kevent);
|
||||
}
|
||||
|
|
|
@ -86,7 +86,7 @@ void file_put(struct file *f)
|
|||
struct io_device_kevent_extra {
|
||||
struct file *file;
|
||||
struct task *task;
|
||||
enum device_channel channel;
|
||||
enum device_kevent_flags flags;
|
||||
};
|
||||
|
||||
static int io_device_kevent_listener(struct kevent *event, void *_extra)
|
||||
|
@ -98,7 +98,7 @@ static int io_device_kevent_listener(struct kevent *event, void *_extra)
|
|||
return KEVENT_CB_NONE;
|
||||
|
||||
struct device_kevent *device_kevent = kevent_to_device_kevent(event);
|
||||
if (device_kevent->channel != extra->channel)
|
||||
if ((device_kevent->flags & extra->flags) == 0)
|
||||
return KEVENT_CB_NONE;
|
||||
|
||||
extra->task->state = TASK_QUEUE;
|
||||
|
@ -108,18 +108,19 @@ static int io_device_kevent_listener(struct kevent *event, void *_extra)
|
|||
return KEVENT_CB_LISTENER_DEL | KEVENT_CB_STOP;
|
||||
}
|
||||
|
||||
static int iowait_device(struct file *file, enum device_channel channel)
|
||||
static int iowait_device(struct file *file, enum device_kevent_flags flags)
|
||||
{
|
||||
file_get(file->fd);
|
||||
kent_get(¤t->kent);
|
||||
|
||||
struct io_device_kevent_extra *extra = malloc(sizeof(*extra));
|
||||
/* this must be atomic because event listeners can't sleep but need to call free() */
|
||||
struct io_device_kevent_extra *extra = atomic_malloc(sizeof(*extra));
|
||||
if (extra == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
extra->file = file;
|
||||
extra->task = current;
|
||||
extra->channel = channel;
|
||||
extra->flags = flags;
|
||||
|
||||
kevent_listener_add(KEVENT_DEVICE, io_device_kevent_listener, extra);
|
||||
yield(TASK_IOWAIT);
|
||||
|
@ -139,7 +140,7 @@ ssize_t file_read(void *buf, struct file *file, size_t len)
|
|||
ssize_t tmp = file->device->read(buf, file->device, len, file->pos);
|
||||
if (tmp < 0) {
|
||||
if (tmp == -EBUSY) {
|
||||
tmp = iowait_device(file, DEVICE_CHANNEL_IN);
|
||||
tmp = iowait_device(file, DEVICE_KEVENT_RX);
|
||||
} else {
|
||||
ret = tmp;
|
||||
break;
|
||||
|
@ -154,7 +155,7 @@ ssize_t file_read(void *buf, struct file *file, size_t len)
|
|||
}
|
||||
|
||||
mutex_unlock(&file->lock);
|
||||
file_kevent_create_and_dispatch(file, FILE_KEVENT_READ | FILE_KEVENT_UNLOCK);
|
||||
file_kevent_create_and_dispatch(file, FILE_KEVENT_READ);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -172,7 +173,7 @@ ssize_t file_write(struct file *file, const void *buf, size_t len)
|
|||
ssize_t tmp = file->device->write(file->device, buf, len, file->pos);
|
||||
if (tmp < 0) {
|
||||
if (tmp == -EBUSY) {
|
||||
tmp = iowait_device(file, DEVICE_CHANNEL_OUT);
|
||||
tmp = iowait_device(file, DEVICE_KEVENT_TX);
|
||||
if (tmp < 0) {
|
||||
ret = tmp;
|
||||
break;
|
||||
|
@ -191,7 +192,7 @@ ssize_t file_write(struct file *file, const void *buf, size_t len)
|
|||
}
|
||||
|
||||
mutex_unlock(&file->lock);
|
||||
file_kevent_create_and_dispatch(file, FILE_KEVENT_WRITE | FILE_KEVENT_UNLOCK);
|
||||
file_kevent_create_and_dispatch(file, FILE_KEVENT_WRITE );
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -237,8 +237,6 @@ void *atomic_malloc(size_t size)
|
|||
|
||||
size = round_alloc_size_up(size);
|
||||
|
||||
atomic_enter();
|
||||
|
||||
struct memblk *cursor;
|
||||
list_for_each_entry(&atomic_heap, cursor, list) {
|
||||
if (blk_get_size(cursor) >= size)
|
||||
|
@ -253,8 +251,6 @@ void *atomic_malloc(size_t size)
|
|||
ptr = cursor->data;
|
||||
}
|
||||
|
||||
atomic_leave();
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue