Commit 99640fa5 authored by A. Hahn's avatar A. Hahn

wishbone: added support for kernel 4.x

parent 372ebdde
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
#define API 8 #define API 8
#endif #endif
#if API <= 7 #if API <= 8
static const struct usb_device_id id_table[] = { static const struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x1D50, 0x6062, 0xFF, 0xFF, 0xFF) }, { USB_DEVICE_AND_INTERFACE_INFO(0x1D50, 0x6062, 0xFF, 0xFF, 0xFF) },
......
...@@ -28,7 +28,7 @@ static DEFINE_MUTEX(wishbone_mutex); ...@@ -28,7 +28,7 @@ static DEFINE_MUTEX(wishbone_mutex);
static struct class *wishbone_master_class; static struct class *wishbone_master_class;
static dev_t wishbone_master_dev_first; static dev_t wishbone_master_dev_first;
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,30) #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,30) || LINUX_VERSION_CODE > KERNEL_VERSION(3,1,19)
/* missing 'const' in 2.6.30. present in 2.6.31. */ /* missing 'const' in 2.6.30. present in 2.6.31. */
static int compat_memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, static int compat_memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
...@@ -56,6 +56,7 @@ static int compat_memcpy_fromiovecend(unsigned char *kdata, const struct iovec * ...@@ -56,6 +56,7 @@ static int compat_memcpy_fromiovecend(unsigned char *kdata, const struct iovec *
} }
/* does not exist in 2.6.30. does in 2.6.31. */ /* does not exist in 2.6.30. does in 2.6.31. */
static int compat_memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata, static int compat_memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
int offset, int len) int offset, int len)
...@@ -119,23 +120,23 @@ static void wishbone_dispatch_msi(struct work_struct *work) ...@@ -119,23 +120,23 @@ static void wishbone_dispatch_msi(struct work_struct *work)
unsigned long flags; unsigned long flags;
uint8_t *wptr; uint8_t *wptr;
int index; int index;
wb = container_of(work, struct wishbone, msi_handler); wb = container_of(work, struct wishbone, msi_handler);
context = 0; context = 0;
/* Hold this mutex for the whole handler */ /* Hold this mutex for the whole handler */
mutex_lock(&wb->msi_mutex); mutex_lock(&wb->msi_mutex);
/* Hold this mutex while we look for stuff to deliver */ /* Hold this mutex while we look for stuff to deliver */
mutex_lock(&wb->device_mutex); mutex_lock(&wb->device_mutex);
/* Don't process a second MSI while a previous is inflight */ /* Don't process a second MSI while a previous is inflight */
if (!wb->msi_pending) { if (!wb->msi_pending) {
/* Process requests */ /* Process requests */
while (wb->wops->request(wb, &request)) { while (wb->wops->request(wb, &request)) {
/* The hardware should already have done this, but be safe */ /* The hardware should already have done this, but be safe */
request.addr &= wb->mask; request.addr &= wb->mask;
/* Find the context which receives this MSI */ /* Find the context which receives this MSI */
index = request.addr / ((wb->mask/WISHBONE_MAX_MSI_OPEN)+1); index = request.addr / ((wb->mask/WISHBONE_MAX_MSI_OPEN)+1);
spin_lock_irqsave(&wb->msi_spinlock, flags); spin_lock_irqsave(&wb->msi_spinlock, flags);
...@@ -152,16 +153,16 @@ static void wishbone_dispatch_msi(struct work_struct *work) ...@@ -152,16 +153,16 @@ static void wishbone_dispatch_msi(struct work_struct *work)
} }
} }
} }
mutex_unlock(&wb->device_mutex); mutex_unlock(&wb->device_mutex);
/* Deliver the MSI */ /* Deliver the MSI */
if (context) { if (context) {
mutex_lock(&context->context_mutex); mutex_lock(&context->context_mutex);
/* Fill in the MSI data */ /* Fill in the MSI data */
wptr = &context->msi[0]; wptr = &context->msi[0];
wptr[0] = ETHERBONE_BCA; wptr[0] = ETHERBONE_BCA;
wptr[1] = request.mask; wptr[1] = request.mask;
if (request.write) { if (request.write) {
...@@ -181,24 +182,24 @@ static void wishbone_dispatch_msi(struct work_struct *work) ...@@ -181,24 +182,24 @@ static void wishbone_dispatch_msi(struct work_struct *work)
eb_from_cpu(wptr, request.addr); eb_from_cpu(wptr, request.addr);
wptr += sizeof(wb_data_t); wptr += sizeof(wb_data_t);
} }
wptr[0] = ETHERBONE_CYC | ETHERBONE_BCA | ETHERBONE_RCA; wptr[0] = ETHERBONE_CYC | ETHERBONE_BCA | ETHERBONE_RCA;
wptr[1] = 0xf; wptr[1] = 0xf;
wptr[2] = 0; wptr[2] = 0;
wptr[3] = 1; wptr[3] = 1;
wptr += sizeof(wb_data_t); wptr += sizeof(wb_data_t);
eb_from_cpu(wptr, WBA_ERR); eb_from_cpu(wptr, WBA_ERR);
wptr += sizeof(wb_data_t); wptr += sizeof(wb_data_t);
eb_from_cpu(wptr, 4); /* low bits of error status register */ eb_from_cpu(wptr, 4); /* low bits of error status register */
wptr += sizeof(wb_data_t); wptr += sizeof(wb_data_t);
/* Mark the MSI pending */ /* Mark the MSI pending */
context->msi_unread = wptr - &context->msi[0]; context->msi_unread = wptr - &context->msi[0];
context->msi_pending = 1; context->msi_pending = 1;
mutex_unlock(&context->context_mutex); mutex_unlock(&context->context_mutex);
/* Wake-up any reader of the device */ /* Wake-up any reader of the device */
wake_up_interruptible(&context->waitq); wake_up_interruptible(&context->waitq);
kill_fasync(&context->fasync, SIGIO, POLL_IN); kill_fasync(&context->fasync, SIGIO, POLL_IN);
...@@ -213,7 +214,7 @@ static void claim_msi(struct etherbone_master_context* context) ...@@ -213,7 +214,7 @@ static void claim_msi(struct etherbone_master_context* context)
unsigned long flags; unsigned long flags;
unsigned i; unsigned i;
struct wishbone *wb = context->wishbone; struct wishbone *wb = context->wishbone;
/* Safe to read msi_index here, because context_mutex held */ /* Safe to read msi_index here, because context_mutex held */
if (context->msi_index != -1) return; if (context->msi_index != -1) return;
...@@ -251,20 +252,20 @@ static wb_data_t handle_read_cfg(struct etherbone_master_context* context, wb_ad ...@@ -251,20 +252,20 @@ static wb_data_t handle_read_cfg(struct etherbone_master_context* context, wb_ad
static void handle_write_cfg(struct etherbone_master_context* context, wb_addr_t addr, wb_data_t data) static void handle_write_cfg(struct etherbone_master_context* context, wb_addr_t addr, wb_data_t data)
{ {
struct wishbone *wb = context->wishbone; struct wishbone *wb = context->wishbone;
switch (addr) { switch (addr) {
case 36: case 36:
if (data == 1) { if (data == 1) {
claim_msi(context); claim_msi(context);
} }
break; break;
case WBA_DATA: case WBA_DATA:
context->msi_data = data; context->msi_data = data;
break; break;
case WBA_ERR: case WBA_ERR:
if (context->msi_pending) { if (context->msi_pending) {
context->msi_pending = 0; context->msi_pending = 0;
wb->msi_pending = 0; wb->msi_pending = 0;
wb->wops->reply(wb, data&1, context->msi_data); wb->wops->reply(wb, data&1, context->msi_data);
...@@ -281,13 +282,13 @@ static void etherbone_master_process(struct etherbone_master_context* context) ...@@ -281,13 +282,13 @@ static void etherbone_master_process(struct etherbone_master_context* context)
const struct wishbone_operations *wops; const struct wishbone_operations *wops;
unsigned int size, left, i, record_len; unsigned int size, left, i, record_len;
unsigned char *buf; unsigned char *buf;
if (context->state == header) { if (context->state == header) {
if (context->received < 8) { if (context->received < 8) {
/* no-op */ /* no-op */
return; return;
} }
context->buf[0] = 0x4E; context->buf[0] = 0x4E;
context->buf[1] = 0x6F; context->buf[1] = 0x6F;
context->buf[2] = 0x12; /* V.1 Probe-Response */ context->buf[2] = 0x12; /* V.1 Probe-Response */
...@@ -296,28 +297,28 @@ static void etherbone_master_process(struct etherbone_master_context* context) ...@@ -296,28 +297,28 @@ static void etherbone_master_process(struct etherbone_master_context* context)
context->processed = 8; context->processed = 8;
context->state = idle; context->state = idle;
} }
buf = &context->buf[0]; buf = &context->buf[0];
wb = context->wishbone; wb = context->wishbone;
wops = wb->wops; wops = wb->wops;
i = RING_INDEX(context->processed); i = RING_INDEX(context->processed);
size = RING_PROC_LEN(context); size = RING_PROC_LEN(context);
for (left = size; left >= 4; left -= record_len) { for (left = size; left >= 4; left -= record_len) {
unsigned char flags, be, wcount, rcount; unsigned char flags, be, wcount, rcount;
/* Determine record size */ /* Determine record size */
flags = buf[i+0]; flags = buf[i+0];
be = buf[i+1]; be = buf[i+1];
wcount = buf[i+2]; wcount = buf[i+2];
rcount = buf[i+3]; rcount = buf[i+3];
record_len = 1 + wcount + rcount + (wcount > 0) + (rcount > 0); record_len = 1 + wcount + rcount + (wcount > 0) + (rcount > 0);
record_len *= sizeof(wb_data_t); record_len *= sizeof(wb_data_t);
if (left < record_len) break; if (left < record_len) break;
/* Configure byte enable and raise cycle line */ /* Configure byte enable and raise cycle line */
if (context->state == idle) { if (context->state == idle) {
mutex_lock(&wb->device_mutex); mutex_lock(&wb->device_mutex);
...@@ -332,15 +333,15 @@ static void etherbone_master_process(struct etherbone_master_context* context) ...@@ -332,15 +333,15 @@ static void etherbone_master_process(struct etherbone_master_context* context)
unsigned char j; unsigned char j;
int wff = flags & ETHERBONE_WFF; int wff = flags & ETHERBONE_WFF;
int wca = flags & ETHERBONE_WCA; int wca = flags & ETHERBONE_WCA;
/* increment=0 if wff!=0 */ /* increment=0 if wff!=0 */
increment = sizeof(wb_data_t) * (1 - (wff / ETHERBONE_WFF)); increment = sizeof(wb_data_t) * (1 - (wff / ETHERBONE_WFF));
/* Erase the header */ /* Erase the header */
eb_from_cpu(buf+i, 0); eb_from_cpu(buf+i, 0);
i = RING_INDEX(i + sizeof(wb_data_t)); i = RING_INDEX(i + sizeof(wb_data_t));
base_address = eb_to_cpu(buf+i); base_address = eb_to_cpu(buf+i);
if (wca) { if (wca) {
for (j = wcount; j > 0; --j) { for (j = wcount; j > 0; --j) {
eb_from_cpu(buf+i, 0); eb_from_cpu(buf+i, 0);
...@@ -357,21 +358,21 @@ static void etherbone_master_process(struct etherbone_master_context* context) ...@@ -357,21 +358,21 @@ static void etherbone_master_process(struct etherbone_master_context* context)
} }
} }
} }
buf[i+0] = (flags & ETHERBONE_CYC) | buf[i+0] = (flags & ETHERBONE_CYC) |
(((flags & ETHERBONE_RFF) != 0) ? ETHERBONE_WFF : 0) | (((flags & ETHERBONE_RFF) != 0) ? ETHERBONE_WFF : 0) |
(((flags & ETHERBONE_BCA) != 0) ? ETHERBONE_WCA : 0); (((flags & ETHERBONE_BCA) != 0) ? ETHERBONE_WCA : 0);
buf[i+1] = be; buf[i+1] = be;
buf[i+2] = rcount; /* rcount -> wcount */ buf[i+2] = rcount; /* rcount -> wcount */
buf[i+3] = 0; buf[i+3] = 0;
if (rcount > 0) { if (rcount > 0) {
unsigned char j; unsigned char j;
int rca = flags & ETHERBONE_RCA; int rca = flags & ETHERBONE_RCA;
/* Move past header, and leave BaseRetAddr intact */ /* Move past header, and leave BaseRetAddr intact */
i = RING_INDEX(i + sizeof(wb_data_t) + sizeof(wb_data_t)); i = RING_INDEX(i + sizeof(wb_data_t) + sizeof(wb_data_t));
if (rca) { if (rca) {
for (j = rcount; j > 0; --j) { for (j = rcount; j > 0; --j) {
eb_from_cpu(buf+i, handle_read_cfg(context, eb_to_cpu(buf+i))); eb_from_cpu(buf+i, handle_read_cfg(context, eb_to_cpu(buf+i)));
...@@ -386,42 +387,42 @@ static void etherbone_master_process(struct etherbone_master_context* context) ...@@ -386,42 +387,42 @@ static void etherbone_master_process(struct etherbone_master_context* context)
} else { } else {
i = RING_INDEX(i + sizeof(wb_data_t)); i = RING_INDEX(i + sizeof(wb_data_t));
} }
if ((flags & ETHERBONE_CYC) != 0) { if ((flags & ETHERBONE_CYC) != 0) {
wops->cycle(wb, 0); wops->cycle(wb, 0);
context->state = idle; context->state = idle;
mutex_unlock(&wb->device_mutex); mutex_unlock(&wb->device_mutex);
} }
} }
context->processed = RING_POS(context->processed + size - left); context->processed = RING_POS(context->processed + size - left);
} }
static int char_master_open(struct inode *inode, struct file *filep) static int char_master_open(struct inode *inode, struct file *filep)
{ {
struct etherbone_master_context *context; struct etherbone_master_context *context;
context = kmalloc(sizeof(struct etherbone_master_context), GFP_KERNEL); context = kmalloc(sizeof(struct etherbone_master_context), GFP_KERNEL);
if (!context) return -ENOMEM; if (!context) return -ENOMEM;
context->wishbone = container_of(inode->i_cdev, struct wishbone, master_cdev); context->wishbone = container_of(inode->i_cdev, struct wishbone, master_cdev);
mutex_init(&context->context_mutex); mutex_init(&context->context_mutex);
context->state = header; context->state = header;
context->sent = 0; context->sent = 0;
context->processed = 0; context->processed = 0;
context->received = 0; context->received = 0;
context->msi_unread = 0; context->msi_unread = 0;
context->msi_pending = 0; context->msi_pending = 0;
context->fasync = 0; context->fasync = 0;
init_waitqueue_head(&context->waitq); init_waitqueue_head(&context->waitq);
context->msi_index = -1; context->msi_index = -1;
filep->private_data = context; filep->private_data = context;
return 0; return 0;
} }
...@@ -430,7 +431,7 @@ static int char_master_release(struct inode *inode, struct file *filep) ...@@ -430,7 +431,7 @@ static int char_master_release(struct inode *inode, struct file *filep)
unsigned long flags; unsigned long flags;
struct etherbone_master_context *context = filep->private_data; struct etherbone_master_context *context = filep->private_data;
struct wishbone *wb = context->wishbone; struct wishbone *wb = context->wishbone;
/* Did the bad user forget to drop the cycle line? */ /* Did the bad user forget to drop the cycle line? */
mutex_lock(&context->context_mutex); mutex_lock(&context->context_mutex);
if (context->state == cycle) { if (context->state == cycle) {
...@@ -439,7 +440,7 @@ static int char_master_release(struct inode *inode, struct file *filep) ...@@ -439,7 +440,7 @@ static int char_master_release(struct inode *inode, struct file *filep)
mutex_unlock(&wb->device_mutex); mutex_unlock(&wb->device_mutex);
} }
mutex_unlock(&context->context_mutex); mutex_unlock(&context->context_mutex);
/* Do not destroy ourselves while an MSI is inflight to us */ /* Do not destroy ourselves while an MSI is inflight to us */
mutex_lock(&wb->msi_mutex); mutex_lock(&wb->msi_mutex);
spin_lock_irqsave(&wb->msi_spinlock, flags); spin_lock_irqsave(&wb->msi_spinlock, flags);
...@@ -448,12 +449,12 @@ static int char_master_release(struct inode *inode, struct file *filep) ...@@ -448,12 +449,12 @@ static int char_master_release(struct inode *inode, struct file *filep)
context->msi_index = -1; context->msi_index = -1;
spin_unlock_irqrestore(&wb->msi_spinlock, flags); spin_unlock_irqrestore(&wb->msi_spinlock, flags);
mutex_unlock(&wb->msi_mutex); mutex_unlock(&wb->msi_mutex);
/* At this point, we know wishbone_dispatch_msi won't call into us */ /* At this point, we know wishbone_dispatch_msi won't call into us */
/* Furthermore, we have the last handle as it's being freed, so we /* Furthermore, we have the last handle as it's being freed, so we
* implicitly hold context_mutex (don't really hold it during kfree!) * implicitly hold context_mutex (don't really hold it during kfree!)
*/ */
/* Finish any unhandled MSI */ /* Finish any unhandled MSI */
if (context->msi_pending) { if (context->msi_pending) {
mutex_lock(&wb->device_mutex); mutex_lock(&wb->device_mutex);
...@@ -481,13 +482,13 @@ static ssize_t char_master_aio_read(struct kiocb *iocb, const struct iovec *iov, ...@@ -481,13 +482,13 @@ static ssize_t char_master_aio_read(struct kiocb *iocb, const struct iovec *iov,
struct file *filep = iocb->ki_filp; struct file *filep = iocb->ki_filp;
struct etherbone_master_context *context = filep->private_data; struct etherbone_master_context *context = filep->private_data;
unsigned int len, iov_len, ring_len, buf_len; unsigned int len, iov_len, ring_len, buf_len;
iov_len = iov_length(iov, nr_segs); iov_len = iov_length(iov, nr_segs);
if (unlikely(iov_len == 0)) return 0; if (unlikely(iov_len == 0)) return 0;
if (mutex_lock_interruptible(&context->context_mutex)) if (mutex_lock_interruptible(&context->context_mutex))
return -EINTR; return -EINTR;
/* If MSI is pending, deliver it */ /* If MSI is pending, deliver it */
if (deliver_msi(context)) { if (deliver_msi(context)) {
/* We don't need a lock here, because no one will write to the msi_unread or /* We don't need a lock here, because no one will write to the msi_unread or
...@@ -499,10 +500,10 @@ static ssize_t char_master_aio_read(struct kiocb *iocb, const struct iovec *iov, ...@@ -499,10 +500,10 @@ static ssize_t char_master_aio_read(struct kiocb *iocb, const struct iovec *iov,
} else { } else {
ring_len = RING_READ_LEN(context); ring_len = RING_READ_LEN(context);
len = min_t(unsigned int, ring_len, iov_len); len = min_t(unsigned int, ring_len, iov_len);
/* How far till we must wrap? */ /* How far till we must wrap? */
buf_len = sizeof(context->buf) - RING_INDEX(context->sent); buf_len = sizeof(context->buf) - RING_INDEX(context->sent);
if (buf_len < len) { if (buf_len < len) {
memcpy_toiovecend(iov, RING_POINTER(context, sent), 0, buf_len); memcpy_toiovecend(iov, RING_POINTER(context, sent), 0, buf_len);
memcpy_toiovecend(iov, &context->buf[0], buf_len, len-buf_len); memcpy_toiovecend(iov, &context->buf[0], buf_len, len-buf_len);
...@@ -511,37 +512,39 @@ static ssize_t char_master_aio_read(struct kiocb *iocb, const struct iovec *iov, ...@@ -511,37 +512,39 @@ static ssize_t char_master_aio_read(struct kiocb *iocb, const struct iovec *iov,
} }
context->sent = RING_POS(context->sent + len); context->sent = RING_POS(context->sent + len);
} }
mutex_unlock(&context->context_mutex); mutex_unlock(&context->context_mutex);
/* Wake-up polling descriptors */ /* Wake-up polling descriptors */
wake_up_interruptible(&context->waitq); wake_up_interruptible(&context->waitq);
kill_fasync(&context->fasync, SIGIO, POLL_OUT); kill_fasync(&context->fasync, SIGIO, POLL_OUT);
if (len == 0 && (filep->f_flags & O_NONBLOCK) != 0) if (len == 0 && (filep->f_flags & O_NONBLOCK) != 0)
return -EAGAIN; return -EAGAIN;
return len; return len;
} }
static ssize_t char_master_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) static ssize_t char_master_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos)
{ {
struct file *filep = iocb->ki_filp; struct file *filep = iocb->ki_filp;
struct etherbone_master_context *context = filep->private_data; struct etherbone_master_context *context = filep->private_data;
unsigned int len, iov_len, ring_len, buf_len; unsigned int len, iov_len, ring_len, buf_len;
iov_len = iov_length(iov, nr_segs); iov_len = iov_length(iov, nr_segs);
if (unlikely(iov_len == 0)) return 0; if (unlikely(iov_len == 0)) return 0;
if (mutex_lock_interruptible(&context->context_mutex)) if (mutex_lock_interruptible(&context->context_mutex))
return -EINTR; return -EINTR;
ring_len = RING_WRITE_LEN(context); ring_len = RING_WRITE_LEN(context);
len = min_t(unsigned int, ring_len, iov_len); len = min_t(unsigned int, ring_len, iov_len);
/* How far till we must wrap? */ /* How far till we must wrap? */
buf_len = sizeof(context->buf) - RING_INDEX(context->received); buf_len = sizeof(context->buf) - RING_INDEX(context->received);
if (buf_len < len) { if (buf_len < len) {
memcpy_fromiovecend(RING_POINTER(context, received), iov, 0, buf_len); memcpy_fromiovecend(RING_POINTER(context, received), iov, 0, buf_len);
memcpy_fromiovecend(&context->buf[0], iov, buf_len, len-buf_len); memcpy_fromiovecend(&context->buf[0], iov, buf_len, len-buf_len);
...@@ -549,19 +552,19 @@ static ssize_t char_master_aio_write(struct kiocb *iocb, const struct iovec *iov ...@@ -549,19 +552,19 @@ static ssize_t char_master_aio_write(struct kiocb *iocb, const struct iovec *iov
memcpy_fromiovecend(RING_POINTER(context, received), iov, 0, len); memcpy_fromiovecend(RING_POINTER(context, received), iov, 0, len);
} }
context->received = RING_POS(context->received + len); context->received = RING_POS(context->received + len);
/* Process buffers */ /* Process buffers */
etherbone_master_process(context); etherbone_master_process(context);
mutex_unlock(&context->context_mutex); mutex_unlock(&context->context_mutex);
/* Wake-up polling descriptors */ /* Wake-up polling descriptors */
wake_up_interruptible(&context->waitq); wake_up_interruptible(&context->waitq);
kill_fasync(&context->fasync, SIGIO, POLL_IN); kill_fasync(&context->fasync, SIGIO, POLL_IN);
if (len == 0 && (filep->f_flags & O_NONBLOCK) != 0) if (len == 0 && (filep->f_flags & O_NONBLOCK) != 0)
return -EAGAIN; return -EAGAIN;
return len; return len;
} }
...@@ -569,17 +572,17 @@ static unsigned int char_master_poll(struct file *filep, poll_table *wait) ...@@ -569,17 +572,17 @@ static unsigned int char_master_poll(struct file *filep, poll_table *wait)
{ {
unsigned int mask = 0; unsigned int mask = 0;
struct etherbone_master_context *context = filep->private_data; struct etherbone_master_context *context = filep->private_data;
poll_wait(filep, &context->waitq, wait); poll_wait(filep, &context->waitq, wait);
mutex_lock(&context->context_mutex); mutex_lock(&context->context_mutex);
if (deliver_msi(context)) mask |= POLLIN | POLLRDNORM; if (deliver_msi(context)) mask |= POLLIN | POLLRDNORM;
if (RING_READ_LEN (context) != 0) mask |= POLLIN | POLLRDNORM; if (RING_READ_LEN (context) != 0) mask |= POLLIN | POLLRDNORM;
if (RING_WRITE_LEN(context) != 0) mask |= POLLOUT | POLLWRNORM; if (RING_WRITE_LEN(context) != 0) mask |= POLLOUT | POLLWRNORM;
mutex_unlock(&context->context_mutex); mutex_unlock(&context->context_mutex);
return mask; return mask;
} }
...@@ -591,6 +594,23 @@ static int char_master_fasync(int fd, struct file *file, int on) ...@@ -591,6 +594,23 @@ static int char_master_fasync(int fd, struct file *file, int on)
return fasync_helper(fd, file, on, &context->fasync); return fasync_helper(fd, file, on, &context->fasync);
} }
#if LINUX_VERSION_CODE > KERNEL_VERSION(4,1,0)
static ssize_t char_master_aio_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
return char_master_aio_read(iocb, iter->iov, iter->nr_segs, iter->iov_offset);
}
static ssize_t char_master_aio_write_iter(struct kiocb *iocb, struct iov_iter *iter)
{
return char_master_aio_write(iocb, iter->iov, iter->nr_segs, iter->iov_offset);
}
#endif
#if LINUX_VERSION_CODE <= KERNEL_VERSION(4,1,0)
static const struct file_operations etherbone_master_fops = { static const struct file_operations etherbone_master_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.llseek = no_llseek, .llseek = no_llseek,
...@@ -604,24 +624,44 @@ static const struct file_operations etherbone_master_fops = { ...@@ -604,24 +624,44 @@ static const struct file_operations etherbone_master_fops = {
.fasync = char_master_fasync, .fasync = char_master_fasync,
}; };
#else
static const struct file_operations etherbone_master_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
//.read = new_sync_read,
.read_iter = char_master_aio_read_iter,
//.write = new_sync_write,
.write_iter = char_master_aio_write_iter,
.open = char_master_open,
.poll = char_master_poll,
.release = char_master_release,
.fasync = char_master_fasync,
};
#endif
//ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
//ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int wishbone_register(struct wishbone* wb) int wishbone_register(struct wishbone* wb)
{ {
struct list_head *list_pos; struct list_head *list_pos;
unsigned int devoff, i; unsigned int devoff, i;
char workqueue_name[40]; char workqueue_name[40];
mutex_init(&wb->device_mutex); mutex_init(&wb->device_mutex);
mutex_init(&wb->msi_mutex); mutex_init(&wb->msi_mutex);
wb->msi_pending = 0; wb->msi_pending = 0;
spin_lock_init(&wb->msi_spinlock); spin_lock_init(&wb->msi_spinlock);
for (i = 0; i < WISHBONE_MAX_MSI_OPEN; ++i) { for (i = 0; i < WISHBONE_MAX_MSI_OPEN; ++i) {
wb->msi_map[i] = 0; wb->msi_map[i] = 0;
} }
/* Grab mutex for insertion of device into global driver list */ /* Grab mutex for insertion of device into global driver list */
mutex_lock(&wishbone_mutex); mutex_lock(&wishbone_mutex);
/* Search the list for gaps, stopping past the gap. /* Search the list for gaps, stopping past the gap.
* If we overflow the list (ie: not gaps), minor already points past end. * If we overflow the list (ie: not gaps), minor already points past end.
*/ */
...@@ -629,58 +669,58 @@ int wishbone_register(struct wishbone* wb) ...@@ -629,58 +669,58 @@ int wishbone_register(struct wishbone* wb)
list_for_each(list_pos, &wishbone_list) { list_for_each(list_pos, &wishbone_list) {
struct wishbone *entry = struct wishbone *entry =
container_of(list_pos, struct wishbone, list); container_of(list_pos, struct wishbone, list);
dev_t master_dev_tmp = dev_t master_dev_tmp =
MKDEV( MKDEV(
MAJOR(wishbone_master_dev_first), MAJOR(wishbone_master_dev_first),
MINOR(wishbone_master_dev_first) + devoff); MINOR(wishbone_master_dev_first) + devoff);
if (entry->master_dev != master_dev_tmp) { if (entry->master_dev != master_dev_tmp) {
/* We found a gap! */ /* We found a gap! */
break; break;
} else { } else {
/* Run out of minors? */ /* Run out of minors? */
if (devoff == max_devices-1) goto fail_out; if (devoff == max_devices-1) goto fail_out;
/* Try the next minor */ /* Try the next minor */
++devoff; ++devoff;
} }
} }
/* Select the free device minor */ /* Select the free device minor */
wb->master_dev = wb->master_dev =
MKDEV( MKDEV(
MAJOR(wishbone_master_dev_first), MAJOR(wishbone_master_dev_first),
MINOR(wishbone_master_dev_first) + devoff); MINOR(wishbone_master_dev_first) + devoff);
/* Connect the file operations with the cdev */ /* Connect the file operations with the cdev */
cdev_init(&wb->master_cdev, &etherbone_master_fops); cdev_init(&wb->master_cdev, &etherbone_master_fops);
wb->master_cdev.owner = wb->wops->owner; wb->master_cdev.owner = wb->wops->owner;
if (cdev_add(&wb->master_cdev, wb->master_dev, 1)) goto fail_out; if (cdev_add(&wb->master_cdev, wb->master_dev, 1)) goto fail_out;
/* Create the sysfs entry */ /* Create the sysfs entry */
wb->master_device = device_create(wishbone_master_class, wb->parent, wb->master_dev, NULL, "wbm%d", devoff); wb->master_device = device_create(wishbone_master_class, wb->parent, wb->master_dev, NULL, "wbm%d", devoff);
if (IS_ERR(wb->master_device)) goto fail_master_cdev; if (IS_ERR(wb->master_device)) goto fail_master_cdev;
/* Prepare the MSI dispatcher for being queued */ /* Prepare the MSI dispatcher for being queued */
INIT_WORK(&wb->msi_handler, &wishbone_dispatch_msi); INIT_WORK(&wb->msi_handler, &wishbone_dispatch_msi);
/* Maybe for older kernels?: */ /* Maybe for older kernels?: */
/* INIT_WORK(&wb->msi_handler, &wishbone_dispatch_msi, &wb->msi_handler); */ /* INIT_WORK(&wb->msi_handler, &wishbone_dispatch_msi, &wb->msi_handler); */
/* Create a workqueue for processing MSIs (in-order) */ /* Create a workqueue for processing MSIs (in-order) */
snprintf(workqueue_name, sizeof(workqueue_name), "wishbone/msi_wbm%d", devoff); snprintf(workqueue_name, sizeof(workqueue_name), "wishbone/msi_wbm%d", devoff);
wb->msi_workqueue = create_singlethread_workqueue(workqueue_name); wb->msi_workqueue = create_singlethread_workqueue(workqueue_name);
if (!wb->msi_workqueue) goto fail_master_dev; if (!wb->msi_workqueue) goto fail_master_dev;
/* Insert the device into the sorted */ /* Insert the device into the sorted */
INIT_LIST_HEAD(&wb->list); INIT_LIST_HEAD(&wb->list);
list_add_tail(&wb->list, list_pos); list_add_tail(&wb->list, list_pos);
mutex_unlock(&wishbone_mutex); mutex_unlock(&wishbone_mutex);
/* Startup the MSI queue */ /* Startup the MSI queue */
wishbone_slave_ready(wb); wishbone_slave_ready(wb);
return 0; return 0;
fail_master_dev: fail_master_dev:
...@@ -696,17 +736,17 @@ int wishbone_unregister(struct wishbone* wb) ...@@ -696,17 +736,17 @@ int wishbone_unregister(struct wishbone* wb)
{ {
if (WARN_ON(list_empty(&wb->list))) if (WARN_ON(list_empty(&wb->list)))
return -EINVAL; return -EINVAL;
mutex_lock(&wishbone_mutex); mutex_lock(&wishbone_mutex);
list_del(&wb->list); list_del(&wb->list);
flush_workqueue(wb->msi_workqueue); flush_workqueue(wb->msi_workqueue);
destroy_workqueue(wb->msi_workqueue); destroy_workqueue(wb->msi_workqueue);
device_destroy(wishbone_master_class, wb->master_dev); device_destroy(wishbone_master_class, wb->master_dev);
cdev_del(&wb->master_cdev); cdev_del(&wb->master_cdev);
mutex_unlock(&wishbone_mutex); mutex_unlock(&wishbone_mutex);
return 0; return 0;
} }
...@@ -721,24 +761,24 @@ static int __init wishbone_init(void) ...@@ -721,24 +761,24 @@ static int __init wishbone_init(void)
dev_t overflow; dev_t overflow;
printk(KERN_NOTICE "wishbone: version " __stringify(GIT_REVISION) " loaded\n"); printk(KERN_NOTICE "wishbone: version " __stringify(GIT_REVISION) " loaded\n");
overflow = MKDEV(0, max_devices-1); overflow = MKDEV(0, max_devices-1);
if (MINOR(overflow) != max_devices-1) { if (MINOR(overflow) != max_devices-1) {
err = -ENOMEM; err = -ENOMEM;
goto fail_last; goto fail_last;
} }
wishbone_master_class = class_create(THIS_MODULE, "wbm"); wishbone_master_class = class_create(THIS_MODULE, "wbm");
if (IS_ERR(wishbone_master_class)) { if (IS_ERR(wishbone_master_class)) {
err = PTR_ERR(wishbone_master_class); err = PTR_ERR(wishbone_master_class);
goto fail_last; goto fail_last;
} }
if (alloc_chrdev_region(&wishbone_master_dev_first, 0, max_devices, "wbm") < 0) { if (alloc_chrdev_region(&wishbone_master_dev_first, 0, max_devices, "wbm") < 0) {
err = -EIO; err = -EIO;
goto fail_master_class; goto fail_master_class;
} }
return 0; return 0;
fail_master_class: fail_master_class:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment