0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016 #include <linux/list.h>
0017 #include <linux/device.h>
0018 #include <linux/module.h>
0019 #include <linux/io.h>
0020 #include <linux/dma-mapping.h>
0021 #include <linux/interrupt.h>
0022 #include <linux/sched.h>
0023 #include <linux/fs.h>
0024 #include <linux/spinlock.h>
0025 #include <linux/mutex.h>
0026 #include <linux/crc32.h>
0027 #include <linux/poll.h>
0028 #include <linux/delay.h>
0029 #include <linux/slab.h>
0030 #include <linux/workqueue.h>
0031 #include "xillybus.h"
0032 #include "xillybus_class.h"
0033
0034 MODULE_DESCRIPTION("Xillybus core functions");
0035 MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
0036 MODULE_ALIAS("xillybus_core");
0037 MODULE_LICENSE("GPL v2");
0038
0039
0040 #define XILLY_RX_TIMEOUT (10*HZ/1000)
0041 #define XILLY_TIMEOUT (100*HZ/1000)
0042
0043 #define fpga_msg_ctrl_reg 0x0008
0044 #define fpga_dma_control_reg 0x0020
0045 #define fpga_dma_bufno_reg 0x0024
0046 #define fpga_dma_bufaddr_lowaddr_reg 0x0028
0047 #define fpga_dma_bufaddr_highaddr_reg 0x002c
0048 #define fpga_buf_ctrl_reg 0x0030
0049 #define fpga_buf_offset_reg 0x0034
0050 #define fpga_endian_reg 0x0040
0051
0052 #define XILLYMSG_OPCODE_RELEASEBUF 1
0053 #define XILLYMSG_OPCODE_QUIESCEACK 2
0054 #define XILLYMSG_OPCODE_FIFOEOF 3
0055 #define XILLYMSG_OPCODE_FATAL_ERROR 4
0056 #define XILLYMSG_OPCODE_NONEMPTY 5
0057
0058 static const char xillyname[] = "xillybus";
0059
0060 static struct workqueue_struct *xillybus_wq;
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090 static void malformed_message(struct xilly_endpoint *endpoint, u32 *buf)
0091 {
0092 int opcode;
0093 int msg_channel, msg_bufno, msg_data, msg_dir;
0094
0095 opcode = (buf[0] >> 24) & 0xff;
0096 msg_dir = buf[0] & 1;
0097 msg_channel = (buf[0] >> 1) & 0x7ff;
0098 msg_bufno = (buf[0] >> 12) & 0x3ff;
0099 msg_data = buf[1] & 0xfffffff;
0100
0101 dev_warn(endpoint->dev,
0102 "Malformed message (skipping): opcode=%d, channel=%03x, dir=%d, bufno=%03x, data=%07x\n",
0103 opcode, msg_channel, msg_dir, msg_bufno, msg_data);
0104 }
0105
0106
0107
0108
0109
0110
0111
0112 irqreturn_t xillybus_isr(int irq, void *data)
0113 {
0114 struct xilly_endpoint *ep = data;
0115 u32 *buf;
0116 unsigned int buf_size;
0117 int i;
0118 int opcode;
0119 unsigned int msg_channel, msg_bufno, msg_data, msg_dir;
0120 struct xilly_channel *channel;
0121
0122 buf = ep->msgbuf_addr;
0123 buf_size = ep->msg_buf_size/sizeof(u32);
0124
0125 dma_sync_single_for_cpu(ep->dev, ep->msgbuf_dma_addr,
0126 ep->msg_buf_size, DMA_FROM_DEVICE);
0127
0128 for (i = 0; i < buf_size; i += 2) {
0129 if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) {
0130 malformed_message(ep, &buf[i]);
0131 dev_warn(ep->dev,
0132 "Sending a NACK on counter %x (instead of %x) on entry %d\n",
0133 ((buf[i+1] >> 28) & 0xf),
0134 ep->msg_counter,
0135 i/2);
0136
0137 if (++ep->failed_messages > 10) {
0138 dev_err(ep->dev,
0139 "Lost sync with interrupt messages. Stopping.\n");
0140 } else {
0141 dma_sync_single_for_device(ep->dev,
0142 ep->msgbuf_dma_addr,
0143 ep->msg_buf_size,
0144 DMA_FROM_DEVICE);
0145
0146 iowrite32(0x01,
0147 ep->registers + fpga_msg_ctrl_reg);
0148 }
0149 return IRQ_HANDLED;
0150 } else if (buf[i] & (1 << 22))
0151 break;
0152 }
0153
0154 if (i >= buf_size) {
0155 dev_err(ep->dev, "Bad interrupt message. Stopping.\n");
0156 return IRQ_HANDLED;
0157 }
0158
0159 buf_size = i + 2;
0160
0161 for (i = 0; i < buf_size; i += 2) {
0162 opcode = (buf[i] >> 24) & 0xff;
0163
0164 msg_dir = buf[i] & 1;
0165 msg_channel = (buf[i] >> 1) & 0x7ff;
0166 msg_bufno = (buf[i] >> 12) & 0x3ff;
0167 msg_data = buf[i+1] & 0xfffffff;
0168
0169 switch (opcode) {
0170 case XILLYMSG_OPCODE_RELEASEBUF:
0171 if ((msg_channel > ep->num_channels) ||
0172 (msg_channel == 0)) {
0173 malformed_message(ep, &buf[i]);
0174 break;
0175 }
0176
0177 channel = ep->channels[msg_channel];
0178
0179 if (msg_dir) {
0180 if (msg_bufno >= channel->num_wr_buffers) {
0181 malformed_message(ep, &buf[i]);
0182 break;
0183 }
0184 spin_lock(&channel->wr_spinlock);
0185 channel->wr_buffers[msg_bufno]->end_offset =
0186 msg_data;
0187 channel->wr_fpga_buf_idx = msg_bufno;
0188 channel->wr_empty = 0;
0189 channel->wr_sleepy = 0;
0190 spin_unlock(&channel->wr_spinlock);
0191
0192 wake_up_interruptible(&channel->wr_wait);
0193
0194 } else {
0195
0196
0197 if (msg_bufno >= channel->num_rd_buffers) {
0198 malformed_message(ep, &buf[i]);
0199 break;
0200 }
0201
0202 spin_lock(&channel->rd_spinlock);
0203 channel->rd_fpga_buf_idx = msg_bufno;
0204 channel->rd_full = 0;
0205 spin_unlock(&channel->rd_spinlock);
0206
0207 wake_up_interruptible(&channel->rd_wait);
0208 if (!channel->rd_synchronous)
0209 queue_delayed_work(
0210 xillybus_wq,
0211 &channel->rd_workitem,
0212 XILLY_RX_TIMEOUT);
0213 }
0214
0215 break;
0216 case XILLYMSG_OPCODE_NONEMPTY:
0217 if ((msg_channel > ep->num_channels) ||
0218 (msg_channel == 0) || (!msg_dir) ||
0219 !ep->channels[msg_channel]->wr_supports_nonempty) {
0220 malformed_message(ep, &buf[i]);
0221 break;
0222 }
0223
0224 channel = ep->channels[msg_channel];
0225
0226 if (msg_bufno >= channel->num_wr_buffers) {
0227 malformed_message(ep, &buf[i]);
0228 break;
0229 }
0230 spin_lock(&channel->wr_spinlock);
0231 if (msg_bufno == channel->wr_host_buf_idx)
0232 channel->wr_ready = 1;
0233 spin_unlock(&channel->wr_spinlock);
0234
0235 wake_up_interruptible(&channel->wr_ready_wait);
0236
0237 break;
0238 case XILLYMSG_OPCODE_QUIESCEACK:
0239 ep->idtlen = msg_data;
0240 wake_up_interruptible(&ep->ep_wait);
0241
0242 break;
0243 case XILLYMSG_OPCODE_FIFOEOF:
0244 if ((msg_channel > ep->num_channels) ||
0245 (msg_channel == 0) || (!msg_dir) ||
0246 !ep->channels[msg_channel]->num_wr_buffers) {
0247 malformed_message(ep, &buf[i]);
0248 break;
0249 }
0250 channel = ep->channels[msg_channel];
0251 spin_lock(&channel->wr_spinlock);
0252 channel->wr_eof = msg_bufno;
0253 channel->wr_sleepy = 0;
0254
0255 channel->wr_hangup = channel->wr_empty &&
0256 (channel->wr_host_buf_idx == msg_bufno);
0257
0258 spin_unlock(&channel->wr_spinlock);
0259
0260 wake_up_interruptible(&channel->wr_wait);
0261
0262 break;
0263 case XILLYMSG_OPCODE_FATAL_ERROR:
0264 ep->fatal_error = 1;
0265 wake_up_interruptible(&ep->ep_wait);
0266 dev_err(ep->dev,
0267 "FPGA reported a fatal error. This means that the low-level communication with the device has failed. This hardware problem is most likely unrelated to Xillybus (neither kernel module nor FPGA core), but reports are still welcome. All I/O is aborted.\n");
0268 break;
0269 default:
0270 malformed_message(ep, &buf[i]);
0271 break;
0272 }
0273 }
0274
0275 dma_sync_single_for_device(ep->dev, ep->msgbuf_dma_addr,
0276 ep->msg_buf_size, DMA_FROM_DEVICE);
0277
0278 ep->msg_counter = (ep->msg_counter + 1) & 0xf;
0279 ep->failed_messages = 0;
0280 iowrite32(0x03, ep->registers + fpga_msg_ctrl_reg);
0281
0282 return IRQ_HANDLED;
0283 }
0284 EXPORT_SYMBOL(xillybus_isr);
0285
0286
0287
0288
0289
0290
0291
0292 static void xillybus_autoflush(struct work_struct *work);
0293
0294 struct xilly_alloc_state {
0295 void *salami;
0296 int left_of_salami;
0297 int nbuffer;
0298 enum dma_data_direction direction;
0299 u32 regdirection;
0300 };
0301
0302 static void xilly_unmap(void *ptr)
0303 {
0304 struct xilly_mapping *data = ptr;
0305
0306 dma_unmap_single(data->device, data->dma_addr,
0307 data->size, data->direction);
0308
0309 kfree(ptr);
0310 }
0311
0312 static int xilly_map_single(struct xilly_endpoint *ep,
0313 void *ptr,
0314 size_t size,
0315 int direction,
0316 dma_addr_t *ret_dma_handle
0317 )
0318 {
0319 dma_addr_t addr;
0320 struct xilly_mapping *this;
0321
0322 this = kzalloc(sizeof(*this), GFP_KERNEL);
0323 if (!this)
0324 return -ENOMEM;
0325
0326 addr = dma_map_single(ep->dev, ptr, size, direction);
0327
0328 if (dma_mapping_error(ep->dev, addr)) {
0329 kfree(this);
0330 return -ENODEV;
0331 }
0332
0333 this->device = ep->dev;
0334 this->dma_addr = addr;
0335 this->size = size;
0336 this->direction = direction;
0337
0338 *ret_dma_handle = addr;
0339
0340 return devm_add_action_or_reset(ep->dev, xilly_unmap, this);
0341 }
0342
0343 static int xilly_get_dma_buffers(struct xilly_endpoint *ep,
0344 struct xilly_alloc_state *s,
0345 struct xilly_buffer **buffers,
0346 int bufnum, int bytebufsize)
0347 {
0348 int i, rc;
0349 dma_addr_t dma_addr;
0350 struct device *dev = ep->dev;
0351 struct xilly_buffer *this_buffer = NULL;
0352
0353 if (buffers) {
0354 this_buffer = devm_kcalloc(dev, bufnum,
0355 sizeof(struct xilly_buffer),
0356 GFP_KERNEL);
0357 if (!this_buffer)
0358 return -ENOMEM;
0359 }
0360
0361 for (i = 0; i < bufnum; i++) {
0362
0363
0364
0365
0366
0367 if ((s->left_of_salami < bytebufsize) &&
0368 (s->left_of_salami > 0)) {
0369 dev_err(ep->dev,
0370 "Corrupt buffer allocation in IDT. Aborting.\n");
0371 return -ENODEV;
0372 }
0373
0374 if (s->left_of_salami == 0) {
0375 int allocorder, allocsize;
0376
0377 allocsize = PAGE_SIZE;
0378 allocorder = 0;
0379 while (bytebufsize > allocsize) {
0380 allocsize *= 2;
0381 allocorder++;
0382 }
0383
0384 s->salami = (void *) devm_get_free_pages(
0385 dev,
0386 GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO,
0387 allocorder);
0388 if (!s->salami)
0389 return -ENOMEM;
0390
0391 s->left_of_salami = allocsize;
0392 }
0393
0394 rc = xilly_map_single(ep, s->salami,
0395 bytebufsize, s->direction,
0396 &dma_addr);
0397 if (rc)
0398 return rc;
0399
0400 iowrite32((u32) (dma_addr & 0xffffffff),
0401 ep->registers + fpga_dma_bufaddr_lowaddr_reg);
0402 iowrite32(((u32) ((((u64) dma_addr) >> 32) & 0xffffffff)),
0403 ep->registers + fpga_dma_bufaddr_highaddr_reg);
0404
0405 if (buffers) {
0406 this_buffer->addr = s->salami;
0407 this_buffer->dma_addr = dma_addr;
0408 buffers[i] = this_buffer++;
0409
0410 iowrite32(s->regdirection | s->nbuffer++,
0411 ep->registers + fpga_dma_bufno_reg);
0412 } else {
0413 ep->msgbuf_addr = s->salami;
0414 ep->msgbuf_dma_addr = dma_addr;
0415 ep->msg_buf_size = bytebufsize;
0416
0417 iowrite32(s->regdirection,
0418 ep->registers + fpga_dma_bufno_reg);
0419 }
0420
0421 s->left_of_salami -= bytebufsize;
0422 s->salami += bytebufsize;
0423 }
0424 return 0;
0425 }
0426
0427 static int xilly_setupchannels(struct xilly_endpoint *ep,
0428 unsigned char *chandesc,
0429 int entries)
0430 {
0431 struct device *dev = ep->dev;
0432 int i, entry, rc;
0433 struct xilly_channel *channel;
0434 int channelnum, bufnum, bufsize, format, is_writebuf;
0435 int bytebufsize;
0436 int synchronous, allowpartial, exclusive_open, seekable;
0437 int supports_nonempty;
0438 int msg_buf_done = 0;
0439
0440 struct xilly_alloc_state rd_alloc = {
0441 .salami = NULL,
0442 .left_of_salami = 0,
0443 .nbuffer = 1,
0444 .direction = DMA_TO_DEVICE,
0445 .regdirection = 0,
0446 };
0447
0448 struct xilly_alloc_state wr_alloc = {
0449 .salami = NULL,
0450 .left_of_salami = 0,
0451 .nbuffer = 1,
0452 .direction = DMA_FROM_DEVICE,
0453 .regdirection = 0x80000000,
0454 };
0455
0456 channel = devm_kcalloc(dev, ep->num_channels,
0457 sizeof(struct xilly_channel), GFP_KERNEL);
0458 if (!channel)
0459 return -ENOMEM;
0460
0461 ep->channels = devm_kcalloc(dev, ep->num_channels + 1,
0462 sizeof(struct xilly_channel *),
0463 GFP_KERNEL);
0464 if (!ep->channels)
0465 return -ENOMEM;
0466
0467 ep->channels[0] = NULL;
0468
0469
0470
0471 for (i = 1; i <= ep->num_channels; i++) {
0472 channel->wr_buffers = NULL;
0473 channel->rd_buffers = NULL;
0474 channel->num_wr_buffers = 0;
0475 channel->num_rd_buffers = 0;
0476 channel->wr_fpga_buf_idx = -1;
0477 channel->wr_host_buf_idx = 0;
0478 channel->wr_host_buf_pos = 0;
0479 channel->wr_empty = 1;
0480 channel->wr_ready = 0;
0481 channel->wr_sleepy = 1;
0482 channel->rd_fpga_buf_idx = 0;
0483 channel->rd_host_buf_idx = 0;
0484 channel->rd_host_buf_pos = 0;
0485 channel->rd_full = 0;
0486 channel->wr_ref_count = 0;
0487 channel->rd_ref_count = 0;
0488
0489 spin_lock_init(&channel->wr_spinlock);
0490 spin_lock_init(&channel->rd_spinlock);
0491 mutex_init(&channel->wr_mutex);
0492 mutex_init(&channel->rd_mutex);
0493 init_waitqueue_head(&channel->rd_wait);
0494 init_waitqueue_head(&channel->wr_wait);
0495 init_waitqueue_head(&channel->wr_ready_wait);
0496
0497 INIT_DELAYED_WORK(&channel->rd_workitem, xillybus_autoflush);
0498
0499 channel->endpoint = ep;
0500 channel->chan_num = i;
0501
0502 channel->log2_element_size = 0;
0503
0504 ep->channels[i] = channel++;
0505 }
0506
0507 for (entry = 0; entry < entries; entry++, chandesc += 4) {
0508 struct xilly_buffer **buffers = NULL;
0509
0510 is_writebuf = chandesc[0] & 0x01;
0511 channelnum = (chandesc[0] >> 1) | ((chandesc[1] & 0x0f) << 7);
0512 format = (chandesc[1] >> 4) & 0x03;
0513 allowpartial = (chandesc[1] >> 6) & 0x01;
0514 synchronous = (chandesc[1] >> 7) & 0x01;
0515 bufsize = 1 << (chandesc[2] & 0x1f);
0516 bufnum = 1 << (chandesc[3] & 0x0f);
0517 exclusive_open = (chandesc[2] >> 7) & 0x01;
0518 seekable = (chandesc[2] >> 6) & 0x01;
0519 supports_nonempty = (chandesc[2] >> 5) & 0x01;
0520
0521 if ((channelnum > ep->num_channels) ||
0522 ((channelnum == 0) && !is_writebuf)) {
0523 dev_err(ep->dev,
0524 "IDT requests channel out of range. Aborting.\n");
0525 return -ENODEV;
0526 }
0527
0528 channel = ep->channels[channelnum];
0529
0530 if (!is_writebuf || channelnum > 0) {
0531 channel->log2_element_size = ((format > 2) ?
0532 2 : format);
0533
0534 bytebufsize = bufsize *
0535 (1 << channel->log2_element_size);
0536
0537 buffers = devm_kcalloc(dev, bufnum,
0538 sizeof(struct xilly_buffer *),
0539 GFP_KERNEL);
0540 if (!buffers)
0541 return -ENOMEM;
0542 } else {
0543 bytebufsize = bufsize << 2;
0544 }
0545
0546 if (!is_writebuf) {
0547 channel->num_rd_buffers = bufnum;
0548 channel->rd_buf_size = bytebufsize;
0549 channel->rd_allow_partial = allowpartial;
0550 channel->rd_synchronous = synchronous;
0551 channel->rd_exclusive_open = exclusive_open;
0552 channel->seekable = seekable;
0553
0554 channel->rd_buffers = buffers;
0555 rc = xilly_get_dma_buffers(ep, &rd_alloc, buffers,
0556 bufnum, bytebufsize);
0557 } else if (channelnum > 0) {
0558 channel->num_wr_buffers = bufnum;
0559 channel->wr_buf_size = bytebufsize;
0560
0561 channel->seekable = seekable;
0562 channel->wr_supports_nonempty = supports_nonempty;
0563
0564 channel->wr_allow_partial = allowpartial;
0565 channel->wr_synchronous = synchronous;
0566 channel->wr_exclusive_open = exclusive_open;
0567
0568 channel->wr_buffers = buffers;
0569 rc = xilly_get_dma_buffers(ep, &wr_alloc, buffers,
0570 bufnum, bytebufsize);
0571 } else {
0572 rc = xilly_get_dma_buffers(ep, &wr_alloc, NULL,
0573 bufnum, bytebufsize);
0574 msg_buf_done++;
0575 }
0576
0577 if (rc)
0578 return -ENOMEM;
0579 }
0580
0581 if (!msg_buf_done) {
0582 dev_err(ep->dev,
0583 "Corrupt IDT: No message buffer. Aborting.\n");
0584 return -ENODEV;
0585 }
0586 return 0;
0587 }
0588
0589 static int xilly_scan_idt(struct xilly_endpoint *endpoint,
0590 struct xilly_idt_handle *idt_handle)
0591 {
0592 int count = 0;
0593 unsigned char *idt = endpoint->channels[1]->wr_buffers[0]->addr;
0594 unsigned char *end_of_idt = idt + endpoint->idtlen - 4;
0595 unsigned char *scan;
0596 int len;
0597
0598 scan = idt + 1;
0599 idt_handle->names = scan;
0600
0601 while ((scan <= end_of_idt) && *scan) {
0602 while ((scan <= end_of_idt) && *scan++)
0603 ;
0604 count++;
0605 }
0606
0607 idt_handle->names_len = scan - idt_handle->names;
0608
0609 scan++;
0610
0611 if (scan > end_of_idt) {
0612 dev_err(endpoint->dev,
0613 "IDT device name list overflow. Aborting.\n");
0614 return -ENODEV;
0615 }
0616 idt_handle->chandesc = scan;
0617
0618 len = endpoint->idtlen - (3 + ((int) (scan - idt)));
0619
0620 if (len & 0x03) {
0621 dev_err(endpoint->dev,
0622 "Corrupt IDT device name list. Aborting.\n");
0623 return -ENODEV;
0624 }
0625
0626 idt_handle->entries = len >> 2;
0627 endpoint->num_channels = count;
0628
0629 return 0;
0630 }
0631
0632 static int xilly_obtain_idt(struct xilly_endpoint *endpoint)
0633 {
0634 struct xilly_channel *channel;
0635 unsigned char *version;
0636 long t;
0637
0638 channel = endpoint->channels[1];
0639
0640 channel->wr_sleepy = 1;
0641
0642 iowrite32(1 |
0643 (3 << 24),
0644 endpoint->registers + fpga_buf_ctrl_reg);
0645
0646 t = wait_event_interruptible_timeout(channel->wr_wait,
0647 (!channel->wr_sleepy),
0648 XILLY_TIMEOUT);
0649
0650 if (t <= 0) {
0651 dev_err(endpoint->dev, "Failed to obtain IDT. Aborting.\n");
0652
0653 if (endpoint->fatal_error)
0654 return -EIO;
0655
0656 return -ENODEV;
0657 }
0658
0659 dma_sync_single_for_cpu(channel->endpoint->dev,
0660 channel->wr_buffers[0]->dma_addr,
0661 channel->wr_buf_size,
0662 DMA_FROM_DEVICE);
0663
0664 if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) {
0665 dev_err(endpoint->dev,
0666 "IDT length mismatch (%d != %d). Aborting.\n",
0667 channel->wr_buffers[0]->end_offset, endpoint->idtlen);
0668 return -ENODEV;
0669 }
0670
0671 if (crc32_le(~0, channel->wr_buffers[0]->addr,
0672 endpoint->idtlen+1) != 0) {
0673 dev_err(endpoint->dev, "IDT failed CRC check. Aborting.\n");
0674 return -ENODEV;
0675 }
0676
0677 version = channel->wr_buffers[0]->addr;
0678
0679
0680 if (*version > 0x82) {
0681 dev_err(endpoint->dev,
0682 "No support for IDT version 0x%02x. Maybe the xillybus driver needs an upgrade. Aborting.\n",
0683 *version);
0684 return -ENODEV;
0685 }
0686
0687 return 0;
0688 }
0689
0690 static ssize_t xillybus_read(struct file *filp, char __user *userbuf,
0691 size_t count, loff_t *f_pos)
0692 {
0693 ssize_t rc;
0694 unsigned long flags;
0695 int bytes_done = 0;
0696 int no_time_left = 0;
0697 long deadline, left_to_sleep;
0698 struct xilly_channel *channel = filp->private_data;
0699
0700 int empty, reached_eof, exhausted, ready;
0701
0702
0703 int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
0704 int waiting_bufidx;
0705
0706 if (channel->endpoint->fatal_error)
0707 return -EIO;
0708
0709 deadline = jiffies + 1 + XILLY_RX_TIMEOUT;
0710
0711 rc = mutex_lock_interruptible(&channel->wr_mutex);
0712 if (rc)
0713 return rc;
0714
0715 while (1) {
0716 int bytes_to_do = count - bytes_done;
0717
0718 spin_lock_irqsave(&channel->wr_spinlock, flags);
0719
0720 empty = channel->wr_empty;
0721 ready = !empty || channel->wr_ready;
0722
0723 if (!empty) {
0724 bufidx = channel->wr_host_buf_idx;
0725 bufpos = channel->wr_host_buf_pos;
0726 howmany = ((channel->wr_buffers[bufidx]->end_offset
0727 + 1) << channel->log2_element_size)
0728 - bufpos;
0729
0730
0731 if (howmany > bytes_to_do) {
0732 bufferdone = 0;
0733
0734 howmany = bytes_to_do;
0735 channel->wr_host_buf_pos += howmany;
0736 } else {
0737 bufferdone = 1;
0738
0739 channel->wr_host_buf_pos = 0;
0740
0741 if (bufidx == channel->wr_fpga_buf_idx) {
0742 channel->wr_empty = 1;
0743 channel->wr_sleepy = 1;
0744 channel->wr_ready = 0;
0745 }
0746
0747 if (bufidx >= (channel->num_wr_buffers - 1))
0748 channel->wr_host_buf_idx = 0;
0749 else
0750 channel->wr_host_buf_idx++;
0751 }
0752 }
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762 reached_eof = channel->wr_empty &&
0763 (channel->wr_host_buf_idx == channel->wr_eof);
0764 channel->wr_hangup = reached_eof;
0765 exhausted = channel->wr_empty;
0766 waiting_bufidx = channel->wr_host_buf_idx;
0767
0768 spin_unlock_irqrestore(&channel->wr_spinlock, flags);
0769
0770 if (!empty) {
0771
0772 if (bufpos == 0)
0773 dma_sync_single_for_cpu(channel->endpoint->dev,
0774 channel->wr_buffers[bufidx]->dma_addr,
0775 channel->wr_buf_size,
0776 DMA_FROM_DEVICE);
0777
0778 if (copy_to_user(
0779 userbuf,
0780 channel->wr_buffers[bufidx]->addr
0781 + bufpos, howmany))
0782 rc = -EFAULT;
0783
0784 userbuf += howmany;
0785 bytes_done += howmany;
0786
0787 if (bufferdone) {
0788 dma_sync_single_for_device(channel->endpoint->dev,
0789 channel->wr_buffers[bufidx]->dma_addr,
0790 channel->wr_buf_size,
0791 DMA_FROM_DEVICE);
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801 iowrite32(1 | (channel->chan_num << 1) |
0802 (bufidx << 12),
0803 channel->endpoint->registers +
0804 fpga_buf_ctrl_reg);
0805 }
0806
0807 if (rc) {
0808 mutex_unlock(&channel->wr_mutex);
0809 return rc;
0810 }
0811 }
0812
0813
0814 if ((bytes_done >= count) || reached_eof)
0815 break;
0816
0817 if (!exhausted)
0818 continue;
0819
0820 if ((bytes_done > 0) &&
0821 (no_time_left ||
0822 (channel->wr_synchronous && channel->wr_allow_partial)))
0823 break;
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834 if (!no_time_left && (filp->f_flags & O_NONBLOCK)) {
0835 if (bytes_done > 0)
0836 break;
0837
0838 if (ready)
0839 goto desperate;
0840
0841 rc = -EAGAIN;
0842 break;
0843 }
0844
0845 if (!no_time_left || (bytes_done > 0)) {
0846
0847
0848
0849
0850
0851 int offsetlimit = ((count - bytes_done) - 1) >>
0852 channel->log2_element_size;
0853 int buf_elements = channel->wr_buf_size >>
0854 channel->log2_element_size;
0855
0856
0857
0858
0859
0860
0861 if (channel->wr_synchronous) {
0862
0863 if (channel->wr_allow_partial &&
0864 (offsetlimit >= buf_elements))
0865 offsetlimit = buf_elements - 1;
0866
0867
0868 if (!channel->wr_allow_partial &&
0869 (offsetlimit >=
0870 (buf_elements * channel->num_wr_buffers)))
0871 offsetlimit = buf_elements *
0872 channel->num_wr_buffers - 1;
0873 }
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883 if (channel->wr_synchronous ||
0884 (offsetlimit < (buf_elements - 1))) {
0885 mutex_lock(&channel->endpoint->register_mutex);
0886
0887 iowrite32(offsetlimit,
0888 channel->endpoint->registers +
0889 fpga_buf_offset_reg);
0890
0891 iowrite32(1 | (channel->chan_num << 1) |
0892 (2 << 24) |
0893 (waiting_bufidx << 12),
0894 channel->endpoint->registers +
0895 fpga_buf_ctrl_reg);
0896
0897 mutex_unlock(&channel->endpoint->
0898 register_mutex);
0899 }
0900 }
0901
0902
0903
0904
0905
0906
0907
0908 if (!channel->wr_allow_partial ||
0909 (no_time_left && (bytes_done == 0))) {
0910
0911
0912
0913
0914
0915
0916 do {
0917 mutex_unlock(&channel->wr_mutex);
0918
0919 if (wait_event_interruptible(
0920 channel->wr_wait,
0921 (!channel->wr_sleepy)))
0922 goto interrupted;
0923
0924 if (mutex_lock_interruptible(
0925 &channel->wr_mutex))
0926 goto interrupted;
0927 } while (channel->wr_sleepy);
0928
0929 continue;
0930
0931 interrupted:
0932 if (channel->endpoint->fatal_error)
0933 return -EIO;
0934 if (bytes_done)
0935 return bytes_done;
0936 if (filp->f_flags & O_NONBLOCK)
0937 return -EAGAIN;
0938 return -EINTR;
0939 }
0940
0941 left_to_sleep = deadline - ((long) jiffies);
0942
0943
0944
0945
0946
0947
0948
0949 if (left_to_sleep > 0) {
0950 left_to_sleep =
0951 wait_event_interruptible_timeout(
0952 channel->wr_wait,
0953 (!channel->wr_sleepy),
0954 left_to_sleep);
0955
0956 if (left_to_sleep > 0)
0957 continue;
0958
0959 if (left_to_sleep < 0) {
0960 mutex_unlock(&channel->wr_mutex);
0961 if (channel->endpoint->fatal_error)
0962 return -EIO;
0963 if (bytes_done)
0964 return bytes_done;
0965 return -EINTR;
0966 }
0967 }
0968
0969 desperate:
0970 no_time_left = 1;
0971
0972 if (bytes_done == 0) {
0973
0974
0975
0976
0977
0978
0979
0980 iowrite32(1 | (channel->chan_num << 1) |
0981 (3 << 24) |
0982 (waiting_bufidx << 12),
0983 channel->endpoint->registers +
0984 fpga_buf_ctrl_reg);
0985 }
0986
0987
0988
0989
0990
0991
0992
0993
0994 }
0995
0996 mutex_unlock(&channel->wr_mutex);
0997
0998 if (channel->endpoint->fatal_error)
0999 return -EIO;
1000
1001 if (rc)
1002 return rc;
1003
1004 return bytes_done;
1005 }
1006
1007
1008
1009
1010
1011
1012
1013
1014 static int xillybus_myflush(struct xilly_channel *channel, long timeout)
1015 {
1016 int rc;
1017 unsigned long flags;
1018
1019 int end_offset_plus1;
1020 int bufidx, bufidx_minus1;
1021 int i;
1022 int empty;
1023 int new_rd_host_buf_pos;
1024
1025 if (channel->endpoint->fatal_error)
1026 return -EIO;
1027 rc = mutex_lock_interruptible(&channel->rd_mutex);
1028 if (rc)
1029 return rc;
1030
1031
1032
1033
1034
1035
1036
1037 if (!channel->rd_ref_count)
1038 goto done;
1039
1040 bufidx = channel->rd_host_buf_idx;
1041
1042 bufidx_minus1 = (bufidx == 0) ?
1043 channel->num_rd_buffers - 1 :
1044 bufidx - 1;
1045
1046 end_offset_plus1 = channel->rd_host_buf_pos >>
1047 channel->log2_element_size;
1048
1049 new_rd_host_buf_pos = channel->rd_host_buf_pos -
1050 (end_offset_plus1 << channel->log2_element_size);
1051
1052
1053 if (end_offset_plus1) {
1054 unsigned char *tail = channel->rd_buffers[bufidx]->addr +
1055 (end_offset_plus1 << channel->log2_element_size);
1056
1057
1058 for (i = 0; i < new_rd_host_buf_pos; i++)
1059 channel->rd_leftovers[i] = *tail++;
1060
1061 spin_lock_irqsave(&channel->rd_spinlock, flags);
1062
1063
1064
1065 if ((timeout < 0) &&
1066 (channel->rd_full ||
1067 (bufidx_minus1 != channel->rd_fpga_buf_idx))) {
1068 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1069
1070
1071
1072
1073
1074 goto done;
1075 }
1076
1077
1078 channel->rd_leftovers[3] = (new_rd_host_buf_pos != 0);
1079
1080
1081
1082 if (bufidx == channel->rd_fpga_buf_idx)
1083 channel->rd_full = 1;
1084 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1085
1086 if (bufidx >= (channel->num_rd_buffers - 1))
1087 channel->rd_host_buf_idx = 0;
1088 else
1089 channel->rd_host_buf_idx++;
1090
1091 dma_sync_single_for_device(channel->endpoint->dev,
1092 channel->rd_buffers[bufidx]->dma_addr,
1093 channel->rd_buf_size,
1094 DMA_TO_DEVICE);
1095
1096 mutex_lock(&channel->endpoint->register_mutex);
1097
1098 iowrite32(end_offset_plus1 - 1,
1099 channel->endpoint->registers + fpga_buf_offset_reg);
1100
1101 iowrite32((channel->chan_num << 1) |
1102 (2 << 24) |
1103 (bufidx << 12),
1104 channel->endpoint->registers + fpga_buf_ctrl_reg);
1105
1106 mutex_unlock(&channel->endpoint->register_mutex);
1107 } else if (bufidx == 0) {
1108 bufidx = channel->num_rd_buffers - 1;
1109 } else {
1110 bufidx--;
1111 }
1112
1113 channel->rd_host_buf_pos = new_rd_host_buf_pos;
1114
1115 if (timeout < 0)
1116 goto done;
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126 while (1) {
1127 spin_lock_irqsave(&channel->rd_spinlock, flags);
1128
1129 if (bufidx != channel->rd_fpga_buf_idx)
1130 channel->rd_full = 1;
1131
1132
1133
1134
1135 empty = !channel->rd_full;
1136
1137 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1138
1139 if (empty)
1140 break;
1141
1142
1143
1144
1145
1146
1147 if (timeout == 0)
1148 wait_event_interruptible(channel->rd_wait,
1149 (!channel->rd_full));
1150
1151 else if (wait_event_interruptible_timeout(
1152 channel->rd_wait,
1153 (!channel->rd_full),
1154 timeout) == 0) {
1155 dev_warn(channel->endpoint->dev,
1156 "Timed out while flushing. Output data may be lost.\n");
1157
1158 rc = -ETIMEDOUT;
1159 break;
1160 }
1161
1162 if (channel->rd_full) {
1163 rc = -EINTR;
1164 break;
1165 }
1166 }
1167
1168 done:
1169 mutex_unlock(&channel->rd_mutex);
1170
1171 if (channel->endpoint->fatal_error)
1172 return -EIO;
1173
1174 return rc;
1175 }
1176
1177 static int xillybus_flush(struct file *filp, fl_owner_t id)
1178 {
1179 if (!(filp->f_mode & FMODE_WRITE))
1180 return 0;
1181
1182 return xillybus_myflush(filp->private_data, HZ);
1183 }
1184
1185 static void xillybus_autoflush(struct work_struct *work)
1186 {
1187 struct delayed_work *workitem = container_of(
1188 work, struct delayed_work, work);
1189 struct xilly_channel *channel = container_of(
1190 workitem, struct xilly_channel, rd_workitem);
1191 int rc;
1192
1193 rc = xillybus_myflush(channel, -1);
1194 if (rc == -EINTR)
1195 dev_warn(channel->endpoint->dev,
1196 "Autoflush failed because work queue thread got a signal.\n");
1197 else if (rc)
1198 dev_err(channel->endpoint->dev,
1199 "Autoflush failed under weird circumstances.\n");
1200 }
1201
1202 static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
1203 size_t count, loff_t *f_pos)
1204 {
1205 ssize_t rc;
1206 unsigned long flags;
1207 int bytes_done = 0;
1208 struct xilly_channel *channel = filp->private_data;
1209
1210 int full, exhausted;
1211
1212
1213 int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
1214 int end_offset_plus1 = 0;
1215
1216 if (channel->endpoint->fatal_error)
1217 return -EIO;
1218
1219 rc = mutex_lock_interruptible(&channel->rd_mutex);
1220 if (rc)
1221 return rc;
1222
1223 while (1) {
1224 int bytes_to_do = count - bytes_done;
1225
1226 spin_lock_irqsave(&channel->rd_spinlock, flags);
1227
1228 full = channel->rd_full;
1229
1230 if (!full) {
1231 bufidx = channel->rd_host_buf_idx;
1232 bufpos = channel->rd_host_buf_pos;
1233 howmany = channel->rd_buf_size - bufpos;
1234
1235
1236
1237
1238
1239
1240
1241 if ((howmany > bytes_to_do) &&
1242 (count ||
1243 ((bufpos >> channel->log2_element_size) == 0))) {
1244 bufferdone = 0;
1245
1246 howmany = bytes_to_do;
1247 channel->rd_host_buf_pos += howmany;
1248 } else {
1249 bufferdone = 1;
1250
1251 if (count) {
1252 end_offset_plus1 =
1253 channel->rd_buf_size >>
1254 channel->log2_element_size;
1255 channel->rd_host_buf_pos = 0;
1256 } else {
1257 unsigned char *tail;
1258 int i;
1259
1260 howmany = 0;
1261
1262 end_offset_plus1 = bufpos >>
1263 channel->log2_element_size;
1264
1265 channel->rd_host_buf_pos -=
1266 end_offset_plus1 <<
1267 channel->log2_element_size;
1268
1269 tail = channel->
1270 rd_buffers[bufidx]->addr +
1271 (end_offset_plus1 <<
1272 channel->log2_element_size);
1273
1274 for (i = 0;
1275 i < channel->rd_host_buf_pos;
1276 i++)
1277 channel->rd_leftovers[i] =
1278 *tail++;
1279 }
1280
1281 if (bufidx == channel->rd_fpga_buf_idx)
1282 channel->rd_full = 1;
1283
1284 if (bufidx >= (channel->num_rd_buffers - 1))
1285 channel->rd_host_buf_idx = 0;
1286 else
1287 channel->rd_host_buf_idx++;
1288 }
1289 }
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299 exhausted = channel->rd_full;
1300
1301 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1302
1303 if (!full) {
1304 unsigned char *head =
1305 channel->rd_buffers[bufidx]->addr;
1306 int i;
1307
1308 if ((bufpos == 0) ||
1309 (channel->rd_leftovers[3] != 0)) {
1310 dma_sync_single_for_cpu(channel->endpoint->dev,
1311 channel->rd_buffers[bufidx]->dma_addr,
1312 channel->rd_buf_size,
1313 DMA_TO_DEVICE);
1314
1315
1316 for (i = 0; i < bufpos; i++)
1317 *head++ = channel->rd_leftovers[i];
1318
1319 channel->rd_leftovers[3] = 0;
1320 }
1321
1322 if (copy_from_user(
1323 channel->rd_buffers[bufidx]->addr + bufpos,
1324 userbuf, howmany))
1325 rc = -EFAULT;
1326
1327 userbuf += howmany;
1328 bytes_done += howmany;
1329
1330 if (bufferdone) {
1331 dma_sync_single_for_device(channel->endpoint->dev,
1332 channel->rd_buffers[bufidx]->dma_addr,
1333 channel->rd_buf_size,
1334 DMA_TO_DEVICE);
1335
1336 mutex_lock(&channel->endpoint->register_mutex);
1337
1338 iowrite32(end_offset_plus1 - 1,
1339 channel->endpoint->registers +
1340 fpga_buf_offset_reg);
1341
1342 iowrite32((channel->chan_num << 1) |
1343 (2 << 24) |
1344 (bufidx << 12),
1345 channel->endpoint->registers +
1346 fpga_buf_ctrl_reg);
1347
1348 mutex_unlock(&channel->endpoint->
1349 register_mutex);
1350
1351 channel->rd_leftovers[3] =
1352 (channel->rd_host_buf_pos != 0);
1353 }
1354
1355 if (rc) {
1356 mutex_unlock(&channel->rd_mutex);
1357
1358 if (channel->endpoint->fatal_error)
1359 return -EIO;
1360
1361 if (!channel->rd_synchronous)
1362 queue_delayed_work(
1363 xillybus_wq,
1364 &channel->rd_workitem,
1365 XILLY_RX_TIMEOUT);
1366
1367 return rc;
1368 }
1369 }
1370
1371 if (bytes_done >= count)
1372 break;
1373
1374 if (!exhausted)
1375 continue;
1376
1377 if ((bytes_done > 0) && channel->rd_allow_partial)
1378 break;
1379
1380
1381
1382
1383
1384
1385
1386 if (filp->f_flags & O_NONBLOCK) {
1387 rc = -EAGAIN;
1388 break;
1389 }
1390
1391 if (wait_event_interruptible(channel->rd_wait,
1392 (!channel->rd_full))) {
1393 mutex_unlock(&channel->rd_mutex);
1394
1395 if (channel->endpoint->fatal_error)
1396 return -EIO;
1397
1398 if (bytes_done)
1399 return bytes_done;
1400 return -EINTR;
1401 }
1402 }
1403
1404 mutex_unlock(&channel->rd_mutex);
1405
1406 if (!channel->rd_synchronous)
1407 queue_delayed_work(xillybus_wq,
1408 &channel->rd_workitem,
1409 XILLY_RX_TIMEOUT);
1410
1411 if (channel->endpoint->fatal_error)
1412 return -EIO;
1413
1414 if (rc)
1415 return rc;
1416
1417 if ((channel->rd_synchronous) && (bytes_done > 0)) {
1418 rc = xillybus_myflush(filp->private_data, 0);
1419
1420 if (rc && (rc != -EINTR))
1421 return rc;
1422 }
1423
1424 return bytes_done;
1425 }
1426
1427 static int xillybus_open(struct inode *inode, struct file *filp)
1428 {
1429 int rc;
1430 unsigned long flags;
1431 struct xilly_endpoint *endpoint;
1432 struct xilly_channel *channel;
1433 int index;
1434
1435 rc = xillybus_find_inode(inode, (void **)&endpoint, &index);
1436 if (rc)
1437 return rc;
1438
1439 if (endpoint->fatal_error)
1440 return -EIO;
1441
1442 channel = endpoint->channels[1 + index];
1443 filp->private_data = channel;
1444
1445
1446
1447
1448
1449
1450
1451 if ((filp->f_mode & FMODE_READ) && (!channel->num_wr_buffers))
1452 return -ENODEV;
1453
1454 if ((filp->f_mode & FMODE_WRITE) && (!channel->num_rd_buffers))
1455 return -ENODEV;
1456
1457 if ((filp->f_mode & FMODE_READ) && (filp->f_flags & O_NONBLOCK) &&
1458 (channel->wr_synchronous || !channel->wr_allow_partial ||
1459 !channel->wr_supports_nonempty)) {
1460 dev_err(endpoint->dev,
1461 "open() failed: O_NONBLOCK not allowed for read on this device\n");
1462 return -ENODEV;
1463 }
1464
1465 if ((filp->f_mode & FMODE_WRITE) && (filp->f_flags & O_NONBLOCK) &&
1466 (channel->rd_synchronous || !channel->rd_allow_partial)) {
1467 dev_err(endpoint->dev,
1468 "open() failed: O_NONBLOCK not allowed for write on this device\n");
1469 return -ENODEV;
1470 }
1471
1472
1473
1474
1475
1476
1477
1478
1479 if (filp->f_mode & FMODE_READ) {
1480 rc = mutex_lock_interruptible(&channel->wr_mutex);
1481 if (rc)
1482 return rc;
1483 }
1484
1485 if (filp->f_mode & FMODE_WRITE) {
1486 rc = mutex_lock_interruptible(&channel->rd_mutex);
1487 if (rc)
1488 goto unlock_wr;
1489 }
1490
1491 if ((filp->f_mode & FMODE_READ) &&
1492 (channel->wr_ref_count != 0) &&
1493 (channel->wr_exclusive_open)) {
1494 rc = -EBUSY;
1495 goto unlock;
1496 }
1497
1498 if ((filp->f_mode & FMODE_WRITE) &&
1499 (channel->rd_ref_count != 0) &&
1500 (channel->rd_exclusive_open)) {
1501 rc = -EBUSY;
1502 goto unlock;
1503 }
1504
1505 if (filp->f_mode & FMODE_READ) {
1506 if (channel->wr_ref_count == 0) {
1507
1508 spin_lock_irqsave(&channel->wr_spinlock, flags);
1509 channel->wr_host_buf_idx = 0;
1510 channel->wr_host_buf_pos = 0;
1511 channel->wr_fpga_buf_idx = -1;
1512 channel->wr_empty = 1;
1513 channel->wr_ready = 0;
1514 channel->wr_sleepy = 1;
1515 channel->wr_eof = -1;
1516 channel->wr_hangup = 0;
1517
1518 spin_unlock_irqrestore(&channel->wr_spinlock, flags);
1519
1520 iowrite32(1 | (channel->chan_num << 1) |
1521 (4 << 24) |
1522 ((channel->wr_synchronous & 1) << 23),
1523 channel->endpoint->registers +
1524 fpga_buf_ctrl_reg);
1525 }
1526
1527 channel->wr_ref_count++;
1528 }
1529
1530 if (filp->f_mode & FMODE_WRITE) {
1531 if (channel->rd_ref_count == 0) {
1532
1533 spin_lock_irqsave(&channel->rd_spinlock, flags);
1534 channel->rd_host_buf_idx = 0;
1535 channel->rd_host_buf_pos = 0;
1536 channel->rd_leftovers[3] = 0;
1537 channel->rd_fpga_buf_idx = channel->num_rd_buffers - 1;
1538 channel->rd_full = 0;
1539
1540 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1541
1542 iowrite32((channel->chan_num << 1) |
1543 (4 << 24),
1544 channel->endpoint->registers +
1545 fpga_buf_ctrl_reg);
1546 }
1547
1548 channel->rd_ref_count++;
1549 }
1550
1551 unlock:
1552 if (filp->f_mode & FMODE_WRITE)
1553 mutex_unlock(&channel->rd_mutex);
1554 unlock_wr:
1555 if (filp->f_mode & FMODE_READ)
1556 mutex_unlock(&channel->wr_mutex);
1557
1558 if (!rc && (!channel->seekable))
1559 return nonseekable_open(inode, filp);
1560
1561 return rc;
1562 }
1563
1564 static int xillybus_release(struct inode *inode, struct file *filp)
1565 {
1566 unsigned long flags;
1567 struct xilly_channel *channel = filp->private_data;
1568
1569 int buf_idx;
1570 int eof;
1571
1572 if (channel->endpoint->fatal_error)
1573 return -EIO;
1574
1575 if (filp->f_mode & FMODE_WRITE) {
1576 mutex_lock(&channel->rd_mutex);
1577
1578 channel->rd_ref_count--;
1579
1580 if (channel->rd_ref_count == 0) {
1581
1582
1583
1584
1585
1586 iowrite32((channel->chan_num << 1) |
1587 (5 << 24),
1588 channel->endpoint->registers +
1589 fpga_buf_ctrl_reg);
1590 }
1591 mutex_unlock(&channel->rd_mutex);
1592 }
1593
1594 if (filp->f_mode & FMODE_READ) {
1595 mutex_lock(&channel->wr_mutex);
1596
1597 channel->wr_ref_count--;
1598
1599 if (channel->wr_ref_count == 0) {
1600 iowrite32(1 | (channel->chan_num << 1) |
1601 (5 << 24),
1602 channel->endpoint->registers +
1603 fpga_buf_ctrl_reg);
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615 while (1) {
1616 spin_lock_irqsave(&channel->wr_spinlock,
1617 flags);
1618 buf_idx = channel->wr_fpga_buf_idx;
1619 eof = channel->wr_eof;
1620 channel->wr_sleepy = 1;
1621 spin_unlock_irqrestore(&channel->wr_spinlock,
1622 flags);
1623
1624
1625
1626
1627
1628
1629
1630 buf_idx++;
1631 if (buf_idx == channel->num_wr_buffers)
1632 buf_idx = 0;
1633
1634 if (buf_idx == eof)
1635 break;
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645 if (wait_event_interruptible(
1646 channel->wr_wait,
1647 (!channel->wr_sleepy)))
1648 msleep(100);
1649
1650 if (channel->wr_sleepy) {
1651 mutex_unlock(&channel->wr_mutex);
1652 dev_warn(channel->endpoint->dev,
1653 "Hardware failed to respond to close command, therefore left in messy state.\n");
1654 return -EINTR;
1655 }
1656 }
1657 }
1658
1659 mutex_unlock(&channel->wr_mutex);
1660 }
1661
1662 return 0;
1663 }
1664
1665 static loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence)
1666 {
1667 struct xilly_channel *channel = filp->private_data;
1668 loff_t pos = filp->f_pos;
1669 int rc = 0;
1670
1671
1672
1673
1674
1675
1676
1677
1678 if (channel->endpoint->fatal_error)
1679 return -EIO;
1680
1681 mutex_lock(&channel->wr_mutex);
1682 mutex_lock(&channel->rd_mutex);
1683
1684 switch (whence) {
1685 case SEEK_SET:
1686 pos = offset;
1687 break;
1688 case SEEK_CUR:
1689 pos += offset;
1690 break;
1691 case SEEK_END:
1692 pos = offset;
1693 break;
1694 default:
1695 rc = -EINVAL;
1696 goto end;
1697 }
1698
1699
1700 if (pos & ((1 << channel->log2_element_size) - 1)) {
1701 rc = -EINVAL;
1702 goto end;
1703 }
1704
1705 mutex_lock(&channel->endpoint->register_mutex);
1706
1707 iowrite32(pos >> channel->log2_element_size,
1708 channel->endpoint->registers + fpga_buf_offset_reg);
1709
1710 iowrite32((channel->chan_num << 1) |
1711 (6 << 24),
1712 channel->endpoint->registers + fpga_buf_ctrl_reg);
1713
1714 mutex_unlock(&channel->endpoint->register_mutex);
1715
1716 end:
1717 mutex_unlock(&channel->rd_mutex);
1718 mutex_unlock(&channel->wr_mutex);
1719
1720 if (rc)
1721 return rc;
1722
1723 filp->f_pos = pos;
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734 channel->rd_leftovers[3] = 0;
1735
1736 return pos;
1737 }
1738
1739 static __poll_t xillybus_poll(struct file *filp, poll_table *wait)
1740 {
1741 struct xilly_channel *channel = filp->private_data;
1742 __poll_t mask = 0;
1743 unsigned long flags;
1744
1745 poll_wait(filp, &channel->endpoint->ep_wait, wait);
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755 if (!channel->wr_synchronous && channel->wr_supports_nonempty) {
1756 poll_wait(filp, &channel->wr_wait, wait);
1757 poll_wait(filp, &channel->wr_ready_wait, wait);
1758
1759 spin_lock_irqsave(&channel->wr_spinlock, flags);
1760 if (!channel->wr_empty || channel->wr_ready)
1761 mask |= EPOLLIN | EPOLLRDNORM;
1762
1763 if (channel->wr_hangup)
1764
1765
1766
1767
1768
1769 mask |= EPOLLIN | EPOLLRDNORM;
1770 spin_unlock_irqrestore(&channel->wr_spinlock, flags);
1771 }
1772
1773
1774
1775
1776
1777
1778
1779 if (channel->rd_allow_partial) {
1780 poll_wait(filp, &channel->rd_wait, wait);
1781
1782 spin_lock_irqsave(&channel->rd_spinlock, flags);
1783 if (!channel->rd_full)
1784 mask |= EPOLLOUT | EPOLLWRNORM;
1785 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1786 }
1787
1788 if (channel->endpoint->fatal_error)
1789 mask |= EPOLLERR;
1790
1791 return mask;
1792 }
1793
1794 static const struct file_operations xillybus_fops = {
1795 .owner = THIS_MODULE,
1796 .read = xillybus_read,
1797 .write = xillybus_write,
1798 .open = xillybus_open,
1799 .flush = xillybus_flush,
1800 .release = xillybus_release,
1801 .llseek = xillybus_llseek,
1802 .poll = xillybus_poll,
1803 };
1804
1805 struct xilly_endpoint *xillybus_init_endpoint(struct device *dev)
1806 {
1807 struct xilly_endpoint *endpoint;
1808
1809 endpoint = devm_kzalloc(dev, sizeof(*endpoint), GFP_KERNEL);
1810 if (!endpoint)
1811 return NULL;
1812
1813 endpoint->dev = dev;
1814 endpoint->msg_counter = 0x0b;
1815 endpoint->failed_messages = 0;
1816 endpoint->fatal_error = 0;
1817
1818 init_waitqueue_head(&endpoint->ep_wait);
1819 mutex_init(&endpoint->register_mutex);
1820
1821 return endpoint;
1822 }
1823 EXPORT_SYMBOL(xillybus_init_endpoint);
1824
1825 static int xilly_quiesce(struct xilly_endpoint *endpoint)
1826 {
1827 long t;
1828
1829 endpoint->idtlen = -1;
1830
1831 iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
1832 endpoint->registers + fpga_dma_control_reg);
1833
1834 t = wait_event_interruptible_timeout(endpoint->ep_wait,
1835 (endpoint->idtlen >= 0),
1836 XILLY_TIMEOUT);
1837 if (t <= 0) {
1838 dev_err(endpoint->dev,
1839 "Failed to quiesce the device on exit.\n");
1840 return -ENODEV;
1841 }
1842 return 0;
1843 }
1844
1845 int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
1846 {
1847 int rc;
1848 long t;
1849
1850 void *bootstrap_resources;
1851 int idtbuffersize = (1 << PAGE_SHIFT);
1852 struct device *dev = endpoint->dev;
1853
1854
1855
1856
1857
1858
1859
1860
1861 unsigned char bogus_idt[8] = { 1, 224, (PAGE_SHIFT)-2, 0,
1862 3, 192, PAGE_SHIFT, 0 };
1863 struct xilly_idt_handle idt_handle;
1864
1865
1866
1867
1868
1869
1870
1871 iowrite32(1, endpoint->registers + fpga_endian_reg);
1872
1873
1874
1875 bootstrap_resources = devres_open_group(dev, NULL, GFP_KERNEL);
1876 if (!bootstrap_resources)
1877 return -ENOMEM;
1878
1879 endpoint->num_channels = 0;
1880
1881 rc = xilly_setupchannels(endpoint, bogus_idt, 1);
1882 if (rc)
1883 return rc;
1884
1885
1886 iowrite32(0x04, endpoint->registers + fpga_msg_ctrl_reg);
1887
1888 endpoint->idtlen = -1;
1889
1890
1891
1892
1893
1894 iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
1895 endpoint->registers + fpga_dma_control_reg);
1896
1897 t = wait_event_interruptible_timeout(endpoint->ep_wait,
1898 (endpoint->idtlen >= 0),
1899 XILLY_TIMEOUT);
1900 if (t <= 0) {
1901 dev_err(endpoint->dev, "No response from FPGA. Aborting.\n");
1902 return -ENODEV;
1903 }
1904
1905
1906 iowrite32((u32) (0x0002 | (endpoint->dma_using_dac & 0x0001)),
1907 endpoint->registers + fpga_dma_control_reg);
1908
1909
1910 while (endpoint->idtlen >= idtbuffersize) {
1911 idtbuffersize *= 2;
1912 bogus_idt[6]++;
1913 }
1914
1915 endpoint->num_channels = 1;
1916
1917 rc = xilly_setupchannels(endpoint, bogus_idt, 2);
1918 if (rc)
1919 goto failed_idt;
1920
1921 rc = xilly_obtain_idt(endpoint);
1922 if (rc)
1923 goto failed_idt;
1924
1925 rc = xilly_scan_idt(endpoint, &idt_handle);
1926 if (rc)
1927 goto failed_idt;
1928
1929 devres_close_group(dev, bootstrap_resources);
1930
1931
1932
1933 rc = xilly_setupchannels(endpoint,
1934 idt_handle.chandesc,
1935 idt_handle.entries);
1936 if (rc)
1937 goto failed_idt;
1938
1939 rc = xillybus_init_chrdev(dev, &xillybus_fops,
1940 endpoint->owner, endpoint,
1941 idt_handle.names,
1942 idt_handle.names_len,
1943 endpoint->num_channels,
1944 xillyname, false);
1945
1946 if (rc)
1947 goto failed_idt;
1948
1949 devres_release_group(dev, bootstrap_resources);
1950
1951 return 0;
1952
1953 failed_idt:
1954 xilly_quiesce(endpoint);
1955 flush_workqueue(xillybus_wq);
1956
1957 return rc;
1958 }
1959 EXPORT_SYMBOL(xillybus_endpoint_discovery);
1960
1961 void xillybus_endpoint_remove(struct xilly_endpoint *endpoint)
1962 {
1963 xillybus_cleanup_chrdev(endpoint, endpoint->dev);
1964
1965 xilly_quiesce(endpoint);
1966
1967
1968
1969
1970
1971 flush_workqueue(xillybus_wq);
1972 }
1973 EXPORT_SYMBOL(xillybus_endpoint_remove);
1974
1975 static int __init xillybus_init(void)
1976 {
1977 xillybus_wq = alloc_workqueue(xillyname, 0, 0);
1978 if (!xillybus_wq)
1979 return -ENOMEM;
1980
1981 return 0;
1982 }
1983
1984 static void __exit xillybus_exit(void)
1985 {
1986
1987 destroy_workqueue(xillybus_wq);
1988 }
1989
1990 module_init(xillybus_init);
1991 module_exit(xillybus_exit);