0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/kthread.h>
0011 #include <linux/slab.h>
0012
0013 #include "c67x00.h"
0014 #include "c67x00-hcd.h"
0015
0016
0017
0018
0019
0020 #define SETUP_STAGE 0
0021 #define DATA_STAGE 1
0022 #define STATUS_STAGE 2
0023
0024
0025
0026
0027
0028
0029 struct c67x00_ep_data {
0030 struct list_head queue;
0031 struct list_head node;
0032 struct usb_host_endpoint *hep;
0033 struct usb_device *dev;
0034 u16 next_frame;
0035 };
0036
0037
0038
0039
0040
0041
0042 struct c67x00_td {
0043
0044 __le16 ly_base_addr;
0045 __le16 port_length;
0046 u8 pid_ep;
0047 u8 dev_addr;
0048 u8 ctrl_reg;
0049 u8 status;
0050 u8 retry_cnt;
0051 #define TT_OFFSET 2
0052 #define TT_CONTROL 0
0053 #define TT_ISOCHRONOUS 1
0054 #define TT_BULK 2
0055 #define TT_INTERRUPT 3
0056 u8 residue;
0057 __le16 next_td_addr;
0058
0059 struct list_head td_list;
0060 u16 td_addr;
0061 void *data;
0062 struct urb *urb;
0063 unsigned long privdata;
0064
0065
0066
0067
0068
0069 struct c67x00_ep_data *ep_data;
0070 unsigned int pipe;
0071 };
0072
0073 struct c67x00_urb_priv {
0074 struct list_head hep_node;
0075 struct urb *urb;
0076 int port;
0077 int cnt;
0078 int status;
0079 struct c67x00_ep_data *ep_data;
0080 };
0081
0082 #define td_udev(td) ((td)->ep_data->dev)
0083
0084 #define CY_TD_SIZE 12
0085
0086 #define TD_PIDEP_OFFSET 0x04
0087 #define TD_PIDEPMASK_PID 0xF0
0088 #define TD_PIDEPMASK_EP 0x0F
0089 #define TD_PORTLENMASK_DL 0x03FF
0090 #define TD_PORTLENMASK_PN 0xC000
0091
0092 #define TD_STATUS_OFFSET 0x07
0093 #define TD_STATUSMASK_ACK 0x01
0094 #define TD_STATUSMASK_ERR 0x02
0095 #define TD_STATUSMASK_TMOUT 0x04
0096 #define TD_STATUSMASK_SEQ 0x08
0097 #define TD_STATUSMASK_SETUP 0x10
0098 #define TD_STATUSMASK_OVF 0x20
0099 #define TD_STATUSMASK_NAK 0x40
0100 #define TD_STATUSMASK_STALL 0x80
0101
0102 #define TD_ERROR_MASK (TD_STATUSMASK_ERR | TD_STATUSMASK_TMOUT | \
0103 TD_STATUSMASK_STALL)
0104
0105 #define TD_RETRYCNT_OFFSET 0x08
0106 #define TD_RETRYCNTMASK_ACT_FLG 0x10
0107 #define TD_RETRYCNTMASK_TX_TYPE 0x0C
0108 #define TD_RETRYCNTMASK_RTY_CNT 0x03
0109
0110 #define TD_RESIDUE_OVERFLOW 0x80
0111
0112 #define TD_PID_IN 0x90
0113
0114
0115 #define td_residue(td) ((__s8)(td->residue))
0116 #define td_ly_base_addr(td) (__le16_to_cpu((td)->ly_base_addr))
0117 #define td_port_length(td) (__le16_to_cpu((td)->port_length))
0118 #define td_next_td_addr(td) (__le16_to_cpu((td)->next_td_addr))
0119
0120 #define td_active(td) ((td)->retry_cnt & TD_RETRYCNTMASK_ACT_FLG)
0121 #define td_length(td) (td_port_length(td) & TD_PORTLENMASK_DL)
0122
0123 #define td_sequence_ok(td) (!td->status || \
0124 (!(td->status & TD_STATUSMASK_SEQ) == \
0125 !(td->ctrl_reg & SEQ_SEL)))
0126
0127 #define td_acked(td) (!td->status || \
0128 (td->status & TD_STATUSMASK_ACK))
0129 #define td_actual_bytes(td) (td_length(td) - td_residue(td))
0130
0131
0132
0133
0134
0135
0136 static void dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg)
0137 {
0138 struct device *dev = c67x00_hcd_dev(c67x00);
0139
0140 dev_dbg(dev, "### %s at 0x%04x\n", msg, td->td_addr);
0141 dev_dbg(dev, "urb: 0x%p\n", td->urb);
0142 dev_dbg(dev, "endpoint: %4d\n", usb_pipeendpoint(td->pipe));
0143 dev_dbg(dev, "pipeout: %4d\n", usb_pipeout(td->pipe));
0144 dev_dbg(dev, "ly_base_addr: 0x%04x\n", td_ly_base_addr(td));
0145 dev_dbg(dev, "port_length: 0x%04x\n", td_port_length(td));
0146 dev_dbg(dev, "pid_ep: 0x%02x\n", td->pid_ep);
0147 dev_dbg(dev, "dev_addr: 0x%02x\n", td->dev_addr);
0148 dev_dbg(dev, "ctrl_reg: 0x%02x\n", td->ctrl_reg);
0149 dev_dbg(dev, "status: 0x%02x\n", td->status);
0150 dev_dbg(dev, "retry_cnt: 0x%02x\n", td->retry_cnt);
0151 dev_dbg(dev, "residue: 0x%02x\n", td->residue);
0152 dev_dbg(dev, "next_td_addr: 0x%04x\n", td_next_td_addr(td));
0153 dev_dbg(dev, "data: %*ph\n", td_length(td), td->data);
0154 }
0155
0156
0157
0158
0159 static inline u16 c67x00_get_current_frame_number(struct c67x00_hcd *c67x00)
0160 {
0161 return c67x00_ll_husb_get_frame(c67x00->sie) & HOST_FRAME_MASK;
0162 }
0163
0164
0165
0166
0167
0168 static inline u16 frame_add(u16 a, u16 b)
0169 {
0170 return (a + b) & HOST_FRAME_MASK;
0171 }
0172
0173
0174
0175
0176 static inline int frame_after(u16 a, u16 b)
0177 {
0178 return ((HOST_FRAME_MASK + a - b) & HOST_FRAME_MASK) <
0179 (HOST_FRAME_MASK / 2);
0180 }
0181
0182
0183
0184
0185 static inline int frame_after_eq(u16 a, u16 b)
0186 {
0187 return ((HOST_FRAME_MASK + 1 + a - b) & HOST_FRAME_MASK) <
0188 (HOST_FRAME_MASK / 2);
0189 }
0190
0191
0192
0193
0194
0195
0196
0197
0198 static void c67x00_release_urb(struct c67x00_hcd *c67x00, struct urb *urb)
0199 {
0200 struct c67x00_td *td;
0201 struct c67x00_urb_priv *urbp;
0202
0203 BUG_ON(!urb);
0204
0205 c67x00->urb_count--;
0206
0207 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
0208 c67x00->urb_iso_count--;
0209 if (c67x00->urb_iso_count == 0)
0210 c67x00->max_frame_bw = MAX_FRAME_BW_STD;
0211 }
0212
0213
0214
0215
0216
0217
0218 list_for_each_entry(td, &c67x00->td_list, td_list)
0219 if (urb == td->urb)
0220 td->urb = NULL;
0221
0222 urbp = urb->hcpriv;
0223 urb->hcpriv = NULL;
0224 list_del(&urbp->hep_node);
0225 kfree(urbp);
0226 }
0227
0228
0229
0230 static struct c67x00_ep_data *
0231 c67x00_ep_data_alloc(struct c67x00_hcd *c67x00, struct urb *urb)
0232 {
0233 struct usb_host_endpoint *hep = urb->ep;
0234 struct c67x00_ep_data *ep_data;
0235 int type;
0236
0237 c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
0238
0239
0240 if (hep->hcpriv) {
0241 ep_data = hep->hcpriv;
0242 if (frame_after(c67x00->current_frame, ep_data->next_frame))
0243 ep_data->next_frame =
0244 frame_add(c67x00->current_frame, 1);
0245 return hep->hcpriv;
0246 }
0247
0248
0249 ep_data = kzalloc(sizeof(*ep_data), GFP_ATOMIC);
0250 if (!ep_data)
0251 return NULL;
0252
0253 INIT_LIST_HEAD(&ep_data->queue);
0254 INIT_LIST_HEAD(&ep_data->node);
0255 ep_data->hep = hep;
0256
0257
0258
0259 ep_data->dev = usb_get_dev(urb->dev);
0260 hep->hcpriv = ep_data;
0261
0262
0263 ep_data->next_frame = frame_add(c67x00->current_frame, 1);
0264
0265
0266
0267 type = usb_pipetype(urb->pipe);
0268 if (list_empty(&ep_data->node)) {
0269 list_add(&ep_data->node, &c67x00->list[type]);
0270 } else {
0271 struct c67x00_ep_data *prev;
0272
0273 list_for_each_entry(prev, &c67x00->list[type], node) {
0274 if (prev->hep->desc.bEndpointAddress >
0275 hep->desc.bEndpointAddress) {
0276 list_add(&ep_data->node, prev->node.prev);
0277 break;
0278 }
0279 }
0280 }
0281
0282 return ep_data;
0283 }
0284
0285 static int c67x00_ep_data_free(struct usb_host_endpoint *hep)
0286 {
0287 struct c67x00_ep_data *ep_data = hep->hcpriv;
0288
0289 if (!ep_data)
0290 return 0;
0291
0292 if (!list_empty(&ep_data->queue))
0293 return -EBUSY;
0294
0295 usb_put_dev(ep_data->dev);
0296 list_del(&ep_data->queue);
0297 list_del(&ep_data->node);
0298
0299 kfree(ep_data);
0300 hep->hcpriv = NULL;
0301
0302 return 0;
0303 }
0304
0305 void c67x00_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
0306 {
0307 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
0308 unsigned long flags;
0309
0310 if (!list_empty(&ep->urb_list))
0311 dev_warn(c67x00_hcd_dev(c67x00), "error: urb list not empty\n");
0312
0313 spin_lock_irqsave(&c67x00->lock, flags);
0314
0315
0316 while (c67x00_ep_data_free(ep)) {
0317
0318 spin_unlock_irqrestore(&c67x00->lock, flags);
0319
0320
0321
0322
0323 reinit_completion(&c67x00->endpoint_disable);
0324 c67x00_sched_kick(c67x00);
0325 wait_for_completion_timeout(&c67x00->endpoint_disable, 1 * HZ);
0326
0327 spin_lock_irqsave(&c67x00->lock, flags);
0328 }
0329
0330 spin_unlock_irqrestore(&c67x00->lock, flags);
0331 }
0332
0333
0334
0335 static inline int get_root_port(struct usb_device *dev)
0336 {
0337 while (dev->parent->parent)
0338 dev = dev->parent;
0339 return dev->portnum;
0340 }
0341
0342 int c67x00_urb_enqueue(struct usb_hcd *hcd,
0343 struct urb *urb, gfp_t mem_flags)
0344 {
0345 int ret;
0346 unsigned long flags;
0347 struct c67x00_urb_priv *urbp;
0348 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
0349 int port = get_root_port(urb->dev)-1;
0350
0351
0352 urbp = kzalloc(sizeof(*urbp), mem_flags);
0353 if (!urbp) {
0354 ret = -ENOMEM;
0355 goto err_urbp;
0356 }
0357
0358 spin_lock_irqsave(&c67x00->lock, flags);
0359
0360
0361 if (!HC_IS_RUNNING(hcd->state)) {
0362 ret = -ENODEV;
0363 goto err_not_linked;
0364 }
0365
0366 ret = usb_hcd_link_urb_to_ep(hcd, urb);
0367 if (ret)
0368 goto err_not_linked;
0369
0370 INIT_LIST_HEAD(&urbp->hep_node);
0371 urbp->urb = urb;
0372 urbp->port = port;
0373
0374 urbp->ep_data = c67x00_ep_data_alloc(c67x00, urb);
0375
0376 if (!urbp->ep_data) {
0377 ret = -ENOMEM;
0378 goto err_epdata;
0379 }
0380
0381
0382
0383
0384 urb->hcpriv = urbp;
0385
0386 urb->actual_length = 0;
0387
0388 switch (usb_pipetype(urb->pipe)) {
0389 case PIPE_CONTROL:
0390 urb->interval = SETUP_STAGE;
0391 break;
0392 case PIPE_INTERRUPT:
0393 break;
0394 case PIPE_BULK:
0395 break;
0396 case PIPE_ISOCHRONOUS:
0397 if (c67x00->urb_iso_count == 0)
0398 c67x00->max_frame_bw = MAX_FRAME_BW_ISO;
0399 c67x00->urb_iso_count++;
0400
0401 if (list_empty(&urbp->ep_data->queue))
0402 urb->start_frame = urbp->ep_data->next_frame;
0403 else {
0404
0405 struct urb *last_urb;
0406
0407 last_urb = list_entry(urbp->ep_data->queue.prev,
0408 struct c67x00_urb_priv,
0409 hep_node)->urb;
0410 urb->start_frame =
0411 frame_add(last_urb->start_frame,
0412 last_urb->number_of_packets *
0413 last_urb->interval);
0414 }
0415 urbp->cnt = 0;
0416 break;
0417 }
0418
0419
0420 list_add_tail(&urbp->hep_node, &urbp->ep_data->queue);
0421
0422
0423 if (!c67x00->urb_count++)
0424 c67x00_ll_hpi_enable_sofeop(c67x00->sie);
0425
0426 c67x00_sched_kick(c67x00);
0427 spin_unlock_irqrestore(&c67x00->lock, flags);
0428
0429 return 0;
0430
0431 err_epdata:
0432 usb_hcd_unlink_urb_from_ep(hcd, urb);
0433 err_not_linked:
0434 spin_unlock_irqrestore(&c67x00->lock, flags);
0435 kfree(urbp);
0436 err_urbp:
0437
0438 return ret;
0439 }
0440
0441 int c67x00_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
0442 {
0443 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
0444 unsigned long flags;
0445 int rc;
0446
0447 spin_lock_irqsave(&c67x00->lock, flags);
0448 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
0449 if (rc)
0450 goto done;
0451
0452 c67x00_release_urb(c67x00, urb);
0453 usb_hcd_unlink_urb_from_ep(hcd, urb);
0454
0455 spin_unlock(&c67x00->lock);
0456 usb_hcd_giveback_urb(hcd, urb, status);
0457 spin_lock(&c67x00->lock);
0458
0459 spin_unlock_irqrestore(&c67x00->lock, flags);
0460
0461 return 0;
0462
0463 done:
0464 spin_unlock_irqrestore(&c67x00->lock, flags);
0465 return rc;
0466 }
0467
0468
0469
0470
0471
0472
0473 static void
0474 c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status)
0475 {
0476 struct c67x00_urb_priv *urbp;
0477
0478 if (!urb)
0479 return;
0480
0481 urbp = urb->hcpriv;
0482 urbp->status = status;
0483
0484 list_del_init(&urbp->hep_node);
0485
0486 c67x00_release_urb(c67x00, urb);
0487 usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb);
0488 spin_unlock(&c67x00->lock);
0489 usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, status);
0490 spin_lock(&c67x00->lock);
0491 }
0492
0493
0494
0495 static int c67x00_claim_frame_bw(struct c67x00_hcd *c67x00, struct urb *urb,
0496 int len, int periodic)
0497 {
0498 struct c67x00_urb_priv *urbp = urb->hcpriv;
0499 int bit_time;
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516 if (urbp->ep_data->dev->speed == USB_SPEED_LOW) {
0517
0518 if (usb_pipein(urb->pipe))
0519 bit_time = 80240 + 7578*len;
0520 else
0521 bit_time = 80260 + 7467*len;
0522 } else {
0523
0524 if (usb_pipeisoc(urb->pipe))
0525 bit_time = usb_pipein(urb->pipe) ? 9050 : 7840;
0526 else
0527 bit_time = 11250;
0528 bit_time += 936*len;
0529 }
0530
0531
0532
0533 bit_time = ((bit_time+50) / 100) + 106;
0534
0535 if (unlikely(bit_time + c67x00->bandwidth_allocated >=
0536 c67x00->max_frame_bw))
0537 return -EMSGSIZE;
0538
0539 if (unlikely(c67x00->next_td_addr + CY_TD_SIZE >=
0540 c67x00->td_base_addr + SIE_TD_SIZE))
0541 return -EMSGSIZE;
0542
0543 if (unlikely(c67x00->next_buf_addr + len >=
0544 c67x00->buf_base_addr + SIE_TD_BUF_SIZE))
0545 return -EMSGSIZE;
0546
0547 if (periodic) {
0548 if (unlikely(bit_time + c67x00->periodic_bw_allocated >=
0549 MAX_PERIODIC_BW(c67x00->max_frame_bw)))
0550 return -EMSGSIZE;
0551 c67x00->periodic_bw_allocated += bit_time;
0552 }
0553
0554 c67x00->bandwidth_allocated += bit_time;
0555 return 0;
0556 }
0557
0558
0559
0560
0561
0562
0563 static int c67x00_create_td(struct c67x00_hcd *c67x00, struct urb *urb,
0564 void *data, int len, int pid, int toggle,
0565 unsigned long privdata)
0566 {
0567 struct c67x00_td *td;
0568 struct c67x00_urb_priv *urbp = urb->hcpriv;
0569 const __u8 active_flag = 1, retry_cnt = 3;
0570 __u8 cmd = 0;
0571 int tt = 0;
0572
0573 if (c67x00_claim_frame_bw(c67x00, urb, len, usb_pipeisoc(urb->pipe)
0574 || usb_pipeint(urb->pipe)))
0575 return -EMSGSIZE;
0576
0577 td = kzalloc(sizeof(*td), GFP_ATOMIC);
0578 if (!td)
0579 return -ENOMEM;
0580
0581 td->pipe = urb->pipe;
0582 td->ep_data = urbp->ep_data;
0583
0584 if ((td_udev(td)->speed == USB_SPEED_LOW) &&
0585 !(c67x00->low_speed_ports & (1 << urbp->port)))
0586 cmd |= PREAMBLE_EN;
0587
0588 switch (usb_pipetype(td->pipe)) {
0589 case PIPE_ISOCHRONOUS:
0590 tt = TT_ISOCHRONOUS;
0591 cmd |= ISO_EN;
0592 break;
0593 case PIPE_CONTROL:
0594 tt = TT_CONTROL;
0595 break;
0596 case PIPE_BULK:
0597 tt = TT_BULK;
0598 break;
0599 case PIPE_INTERRUPT:
0600 tt = TT_INTERRUPT;
0601 break;
0602 }
0603
0604 if (toggle)
0605 cmd |= SEQ_SEL;
0606
0607 cmd |= ARM_EN;
0608
0609
0610 td->td_addr = c67x00->next_td_addr;
0611 c67x00->next_td_addr = c67x00->next_td_addr + CY_TD_SIZE;
0612
0613
0614 td->ly_base_addr = __cpu_to_le16(c67x00->next_buf_addr);
0615 td->port_length = __cpu_to_le16((c67x00->sie->sie_num << 15) |
0616 (urbp->port << 14) | (len & 0x3FF));
0617 td->pid_ep = ((pid & 0xF) << TD_PIDEP_OFFSET) |
0618 (usb_pipeendpoint(td->pipe) & 0xF);
0619 td->dev_addr = usb_pipedevice(td->pipe) & 0x7F;
0620 td->ctrl_reg = cmd;
0621 td->status = 0;
0622 td->retry_cnt = (tt << TT_OFFSET) | (active_flag << 4) | retry_cnt;
0623 td->residue = 0;
0624 td->next_td_addr = __cpu_to_le16(c67x00->next_td_addr);
0625
0626
0627 td->data = data;
0628 td->urb = urb;
0629 td->privdata = privdata;
0630
0631 c67x00->next_buf_addr += (len + 1) & ~0x01;
0632
0633 list_add_tail(&td->td_list, &c67x00->td_list);
0634 return 0;
0635 }
0636
0637 static inline void c67x00_release_td(struct c67x00_td *td)
0638 {
0639 list_del_init(&td->td_list);
0640 kfree(td);
0641 }
0642
0643
0644
0645 static int c67x00_add_data_urb(struct c67x00_hcd *c67x00, struct urb *urb)
0646 {
0647 int remaining;
0648 int toggle;
0649 int pid;
0650 int ret = 0;
0651 int maxps;
0652 int need_empty;
0653
0654 toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
0655 usb_pipeout(urb->pipe));
0656 remaining = urb->transfer_buffer_length - urb->actual_length;
0657
0658 maxps = usb_maxpacket(urb->dev, urb->pipe);
0659
0660 need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
0661 usb_pipeout(urb->pipe) && !(remaining % maxps);
0662
0663 while (remaining || need_empty) {
0664 int len;
0665 char *td_buf;
0666
0667 len = (remaining > maxps) ? maxps : remaining;
0668 if (!len)
0669 need_empty = 0;
0670
0671 pid = usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
0672 td_buf = urb->transfer_buffer + urb->transfer_buffer_length -
0673 remaining;
0674 ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, toggle,
0675 DATA_STAGE);
0676 if (ret)
0677 return ret;
0678
0679 toggle ^= 1;
0680 remaining -= len;
0681 if (usb_pipecontrol(urb->pipe))
0682 break;
0683 }
0684
0685 return 0;
0686 }
0687
0688
0689
0690
0691 static int c67x00_add_ctrl_urb(struct c67x00_hcd *c67x00, struct urb *urb)
0692 {
0693 int ret;
0694 int pid;
0695
0696 switch (urb->interval) {
0697 default:
0698 case SETUP_STAGE:
0699 ret = c67x00_create_td(c67x00, urb, urb->setup_packet,
0700 8, USB_PID_SETUP, 0, SETUP_STAGE);
0701 if (ret)
0702 return ret;
0703 urb->interval = SETUP_STAGE;
0704 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
0705 usb_pipeout(urb->pipe), 1);
0706 break;
0707 case DATA_STAGE:
0708 if (urb->transfer_buffer_length) {
0709 ret = c67x00_add_data_urb(c67x00, urb);
0710 if (ret)
0711 return ret;
0712 break;
0713 }
0714 fallthrough;
0715 case STATUS_STAGE:
0716 pid = !usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
0717 ret = c67x00_create_td(c67x00, urb, NULL, 0, pid, 1,
0718 STATUS_STAGE);
0719 if (ret)
0720 return ret;
0721 break;
0722 }
0723
0724 return 0;
0725 }
0726
0727
0728
0729
0730 static int c67x00_add_int_urb(struct c67x00_hcd *c67x00, struct urb *urb)
0731 {
0732 struct c67x00_urb_priv *urbp = urb->hcpriv;
0733
0734 if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) {
0735 urbp->ep_data->next_frame =
0736 frame_add(urbp->ep_data->next_frame, urb->interval);
0737 return c67x00_add_data_urb(c67x00, urb);
0738 }
0739 return 0;
0740 }
0741
0742 static int c67x00_add_iso_urb(struct c67x00_hcd *c67x00, struct urb *urb)
0743 {
0744 struct c67x00_urb_priv *urbp = urb->hcpriv;
0745
0746 if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) {
0747 char *td_buf;
0748 int len, pid, ret;
0749
0750 BUG_ON(urbp->cnt >= urb->number_of_packets);
0751
0752 td_buf = urb->transfer_buffer +
0753 urb->iso_frame_desc[urbp->cnt].offset;
0754 len = urb->iso_frame_desc[urbp->cnt].length;
0755 pid = usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
0756
0757 ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, 0,
0758 urbp->cnt);
0759 if (ret) {
0760 dev_dbg(c67x00_hcd_dev(c67x00), "create failed: %d\n",
0761 ret);
0762 urb->iso_frame_desc[urbp->cnt].actual_length = 0;
0763 urb->iso_frame_desc[urbp->cnt].status = ret;
0764 if (urbp->cnt + 1 == urb->number_of_packets)
0765 c67x00_giveback_urb(c67x00, urb, 0);
0766 }
0767
0768 urbp->ep_data->next_frame =
0769 frame_add(urbp->ep_data->next_frame, urb->interval);
0770 urbp->cnt++;
0771 }
0772 return 0;
0773 }
0774
0775
0776
0777 static void c67x00_fill_from_list(struct c67x00_hcd *c67x00, int type,
0778 int (*add)(struct c67x00_hcd *, struct urb *))
0779 {
0780 struct c67x00_ep_data *ep_data;
0781 struct urb *urb;
0782
0783
0784 list_for_each_entry(ep_data, &c67x00->list[type], node) {
0785 if (!list_empty(&ep_data->queue)) {
0786
0787
0788 urb = list_entry(ep_data->queue.next,
0789 struct c67x00_urb_priv,
0790 hep_node)->urb;
0791 add(c67x00, urb);
0792 }
0793 }
0794 }
0795
0796 static void c67x00_fill_frame(struct c67x00_hcd *c67x00)
0797 {
0798 struct c67x00_td *td, *ttd;
0799
0800
0801 if (!list_empty(&c67x00->td_list)) {
0802 dev_warn(c67x00_hcd_dev(c67x00),
0803 "TD list not empty! This should not happen!\n");
0804 list_for_each_entry_safe(td, ttd, &c67x00->td_list, td_list) {
0805 dbg_td(c67x00, td, "Unprocessed td");
0806 c67x00_release_td(td);
0807 }
0808 }
0809
0810
0811 c67x00->bandwidth_allocated = 0;
0812 c67x00->periodic_bw_allocated = 0;
0813
0814 c67x00->next_td_addr = c67x00->td_base_addr;
0815 c67x00->next_buf_addr = c67x00->buf_base_addr;
0816
0817
0818 c67x00_fill_from_list(c67x00, PIPE_ISOCHRONOUS, c67x00_add_iso_urb);
0819 c67x00_fill_from_list(c67x00, PIPE_INTERRUPT, c67x00_add_int_urb);
0820 c67x00_fill_from_list(c67x00, PIPE_CONTROL, c67x00_add_ctrl_urb);
0821 c67x00_fill_from_list(c67x00, PIPE_BULK, c67x00_add_data_urb);
0822 }
0823
0824
0825
0826
0827
0828
0829 static inline void
0830 c67x00_parse_td(struct c67x00_hcd *c67x00, struct c67x00_td *td)
0831 {
0832 c67x00_ll_read_mem_le16(c67x00->sie->dev,
0833 td->td_addr, td, CY_TD_SIZE);
0834
0835 if (usb_pipein(td->pipe) && td_actual_bytes(td))
0836 c67x00_ll_read_mem_le16(c67x00->sie->dev, td_ly_base_addr(td),
0837 td->data, td_actual_bytes(td));
0838 }
0839
0840 static int c67x00_td_to_error(struct c67x00_hcd *c67x00, struct c67x00_td *td)
0841 {
0842 if (td->status & TD_STATUSMASK_ERR) {
0843 dbg_td(c67x00, td, "ERROR_FLAG");
0844 return -EILSEQ;
0845 }
0846 if (td->status & TD_STATUSMASK_STALL) {
0847
0848 return -EPIPE;
0849 }
0850 if (td->status & TD_STATUSMASK_TMOUT) {
0851 dbg_td(c67x00, td, "TIMEOUT");
0852 return -ETIMEDOUT;
0853 }
0854
0855 return 0;
0856 }
0857
0858 static inline int c67x00_end_of_data(struct c67x00_td *td)
0859 {
0860 int maxps, need_empty, remaining;
0861 struct urb *urb = td->urb;
0862 int act_bytes;
0863
0864 act_bytes = td_actual_bytes(td);
0865
0866 if (unlikely(!act_bytes))
0867 return 1;
0868
0869 maxps = usb_maxpacket(td_udev(td), td->pipe);
0870
0871 if (unlikely(act_bytes < maxps))
0872 return 1;
0873
0874 remaining = urb->transfer_buffer_length - urb->actual_length;
0875 need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
0876 usb_pipeout(urb->pipe) && !(remaining % maxps);
0877
0878 if (unlikely(!remaining && !need_empty))
0879 return 1;
0880
0881 return 0;
0882 }
0883
0884
0885
0886
0887
0888
0889 static inline void c67x00_clear_pipe(struct c67x00_hcd *c67x00,
0890 struct c67x00_td *last_td)
0891 {
0892 struct c67x00_td *td, *tmp;
0893 td = last_td;
0894 tmp = last_td;
0895 while (td->td_list.next != &c67x00->td_list) {
0896 td = list_entry(td->td_list.next, struct c67x00_td, td_list);
0897 if (td->pipe == last_td->pipe) {
0898 c67x00_release_td(td);
0899 td = tmp;
0900 }
0901 tmp = td;
0902 }
0903 }
0904
0905
0906
0907 static void c67x00_handle_successful_td(struct c67x00_hcd *c67x00,
0908 struct c67x00_td *td)
0909 {
0910 struct urb *urb = td->urb;
0911
0912 if (!urb)
0913 return;
0914
0915 urb->actual_length += td_actual_bytes(td);
0916
0917 switch (usb_pipetype(td->pipe)) {
0918
0919 case PIPE_CONTROL:
0920 switch (td->privdata) {
0921 case SETUP_STAGE:
0922 urb->interval =
0923 urb->transfer_buffer_length ?
0924 DATA_STAGE : STATUS_STAGE;
0925
0926 urb->actual_length = 0;
0927 break;
0928
0929 case DATA_STAGE:
0930 if (c67x00_end_of_data(td)) {
0931 urb->interval = STATUS_STAGE;
0932 c67x00_clear_pipe(c67x00, td);
0933 }
0934 break;
0935
0936 case STATUS_STAGE:
0937 urb->interval = 0;
0938 c67x00_giveback_urb(c67x00, urb, 0);
0939 break;
0940 }
0941 break;
0942
0943 case PIPE_INTERRUPT:
0944 case PIPE_BULK:
0945 if (unlikely(c67x00_end_of_data(td))) {
0946 c67x00_clear_pipe(c67x00, td);
0947 c67x00_giveback_urb(c67x00, urb, 0);
0948 }
0949 break;
0950 }
0951 }
0952
0953 static void c67x00_handle_isoc(struct c67x00_hcd *c67x00, struct c67x00_td *td)
0954 {
0955 struct urb *urb = td->urb;
0956 int cnt;
0957
0958 if (!urb)
0959 return;
0960
0961 cnt = td->privdata;
0962
0963 if (td->status & TD_ERROR_MASK)
0964 urb->error_count++;
0965
0966 urb->iso_frame_desc[cnt].actual_length = td_actual_bytes(td);
0967 urb->iso_frame_desc[cnt].status = c67x00_td_to_error(c67x00, td);
0968 if (cnt + 1 == urb->number_of_packets)
0969 c67x00_giveback_urb(c67x00, urb, 0);
0970 }
0971
0972
0973
0974
0975
0976
0977
0978 static inline void c67x00_check_td_list(struct c67x00_hcd *c67x00)
0979 {
0980 struct c67x00_td *td, *tmp;
0981 struct urb *urb;
0982 int ack_ok;
0983 int clear_endpoint;
0984
0985 list_for_each_entry_safe(td, tmp, &c67x00->td_list, td_list) {
0986
0987 c67x00_parse_td(c67x00, td);
0988 urb = td->urb;
0989 ack_ok = 0;
0990 clear_endpoint = 1;
0991
0992
0993 if (usb_pipeisoc(td->pipe)) {
0994 clear_endpoint = 0;
0995 c67x00_handle_isoc(c67x00, td);
0996 goto cont;
0997 }
0998
0999
1000
1001
1002 if (td->status & TD_ERROR_MASK) {
1003 c67x00_giveback_urb(c67x00, urb,
1004 c67x00_td_to_error(c67x00, td));
1005 goto cont;
1006 }
1007
1008 if ((td->status & TD_STATUSMASK_NAK) || !td_sequence_ok(td) ||
1009 !td_acked(td))
1010 goto cont;
1011
1012
1013 ack_ok = 1;
1014
1015 if (unlikely(td->status & TD_STATUSMASK_OVF)) {
1016 if (td_residue(td) & TD_RESIDUE_OVERFLOW) {
1017
1018 c67x00_giveback_urb(c67x00, urb, -EOVERFLOW);
1019 goto cont;
1020 }
1021 }
1022
1023 clear_endpoint = 0;
1024 c67x00_handle_successful_td(c67x00, td);
1025
1026 cont:
1027 if (clear_endpoint)
1028 c67x00_clear_pipe(c67x00, td);
1029 if (ack_ok)
1030 usb_settoggle(td_udev(td), usb_pipeendpoint(td->pipe),
1031 usb_pipeout(td->pipe),
1032 !(td->ctrl_reg & SEQ_SEL));
1033
1034 tmp = list_entry(td->td_list.next, typeof(*td), td_list);
1035 c67x00_release_td(td);
1036 }
1037 }
1038
1039
1040
1041 static inline int c67x00_all_tds_processed(struct c67x00_hcd *c67x00)
1042 {
1043
1044
1045
1046 return !c67x00_ll_husb_get_current_td(c67x00->sie);
1047 }
1048
1049
1050
1051
1052 static void c67x00_send_td(struct c67x00_hcd *c67x00, struct c67x00_td *td)
1053 {
1054 int len = td_length(td);
1055
1056 if (len && ((td->pid_ep & TD_PIDEPMASK_PID) != TD_PID_IN))
1057 c67x00_ll_write_mem_le16(c67x00->sie->dev, td_ly_base_addr(td),
1058 td->data, len);
1059
1060 c67x00_ll_write_mem_le16(c67x00->sie->dev,
1061 td->td_addr, td, CY_TD_SIZE);
1062 }
1063
1064 static void c67x00_send_frame(struct c67x00_hcd *c67x00)
1065 {
1066 struct c67x00_td *td;
1067
1068 if (list_empty(&c67x00->td_list))
1069 dev_warn(c67x00_hcd_dev(c67x00),
1070 "%s: td list should not be empty here!\n",
1071 __func__);
1072
1073 list_for_each_entry(td, &c67x00->td_list, td_list) {
1074 if (td->td_list.next == &c67x00->td_list)
1075 td->next_td_addr = 0;
1076
1077 c67x00_send_td(c67x00, td);
1078 }
1079
1080 c67x00_ll_husb_set_current_td(c67x00->sie, c67x00->td_base_addr);
1081 }
1082
1083
1084
1085
1086
1087
1088 static void c67x00_do_work(struct c67x00_hcd *c67x00)
1089 {
1090 spin_lock(&c67x00->lock);
1091
1092 if (!c67x00_all_tds_processed(c67x00))
1093 goto out;
1094
1095 c67x00_check_td_list(c67x00);
1096
1097
1098
1099 complete(&c67x00->endpoint_disable);
1100
1101 if (!list_empty(&c67x00->td_list))
1102 goto out;
1103
1104 c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
1105 if (c67x00->current_frame == c67x00->last_frame)
1106 goto out;
1107 c67x00->last_frame = c67x00->current_frame;
1108
1109
1110 if (!c67x00->urb_count) {
1111 c67x00_ll_hpi_disable_sofeop(c67x00->sie);
1112 goto out;
1113 }
1114
1115 c67x00_fill_frame(c67x00);
1116 if (!list_empty(&c67x00->td_list))
1117
1118 c67x00_send_frame(c67x00);
1119
1120 out:
1121 spin_unlock(&c67x00->lock);
1122 }
1123
1124
1125
1126 static void c67x00_sched_work(struct work_struct *work)
1127 {
1128 struct c67x00_hcd *c67x00;
1129
1130 c67x00 = container_of(work, struct c67x00_hcd, work);
1131 c67x00_do_work(c67x00);
1132 }
1133
1134 void c67x00_sched_kick(struct c67x00_hcd *c67x00)
1135 {
1136 queue_work(system_highpri_wq, &c67x00->work);
1137 }
1138
1139 int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00)
1140 {
1141 INIT_WORK(&c67x00->work, c67x00_sched_work);
1142 return 0;
1143 }
1144
1145 void c67x00_sched_stop_scheduler(struct c67x00_hcd *c67x00)
1146 {
1147 cancel_work_sync(&c67x00->work);
1148 }