0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041 #undef ISP1362_DEBUG
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051 #undef BUGGY_PXA2XX_UDC_USBTEST
0052
0053 #undef PTD_TRACE
0054 #undef URB_TRACE
0055 #undef VERBOSE
0056 #undef REGISTERS
0057
0058
0059
0060
0061 #undef CHIP_BUFFER_TEST
0062
0063 #include <linux/module.h>
0064 #include <linux/moduleparam.h>
0065 #include <linux/kernel.h>
0066 #include <linux/delay.h>
0067 #include <linux/ioport.h>
0068 #include <linux/sched.h>
0069 #include <linux/slab.h>
0070 #include <linux/errno.h>
0071 #include <linux/list.h>
0072 #include <linux/interrupt.h>
0073 #include <linux/usb.h>
0074 #include <linux/usb/isp1362.h>
0075 #include <linux/usb/hcd.h>
0076 #include <linux/platform_device.h>
0077 #include <linux/pm.h>
0078 #include <linux/io.h>
0079 #include <linux/bitmap.h>
0080 #include <linux/prefetch.h>
0081 #include <linux/debugfs.h>
0082 #include <linux/seq_file.h>
0083
0084 #include <asm/irq.h>
0085 #include <asm/byteorder.h>
0086 #include <asm/unaligned.h>
0087
0088 static int dbg_level;
0089 #ifdef ISP1362_DEBUG
0090 module_param(dbg_level, int, 0644);
0091 #else
0092 module_param(dbg_level, int, 0);
0093 #endif
0094
0095 #include "../core/usb.h"
0096 #include "isp1362.h"
0097
0098
0099 #define DRIVER_VERSION "2005-04-04"
0100 #define DRIVER_DESC "ISP1362 USB Host Controller Driver"
0101
0102 MODULE_DESCRIPTION(DRIVER_DESC);
0103 MODULE_LICENSE("GPL");
0104
0105 static const char hcd_name[] = "isp1362-hcd";
0106
0107 static void isp1362_hc_stop(struct usb_hcd *hcd);
0108 static int isp1362_hc_start(struct usb_hcd *hcd);
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119 static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
0120 {
0121 if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
0122 return;
0123 if (mask & ~isp1362_hcd->irqenb)
0124 isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
0125 isp1362_hcd->irqenb |= mask;
0126 if (isp1362_hcd->irq_active)
0127 return;
0128 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
0129 }
0130
0131
0132
0133 static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
0134 u16 offset)
0135 {
0136 struct isp1362_ep_queue *epq = NULL;
0137
0138 if (offset < isp1362_hcd->istl_queue[1].buf_start)
0139 epq = &isp1362_hcd->istl_queue[0];
0140 else if (offset < isp1362_hcd->intl_queue.buf_start)
0141 epq = &isp1362_hcd->istl_queue[1];
0142 else if (offset < isp1362_hcd->atl_queue.buf_start)
0143 epq = &isp1362_hcd->intl_queue;
0144 else if (offset < isp1362_hcd->atl_queue.buf_start +
0145 isp1362_hcd->atl_queue.buf_size)
0146 epq = &isp1362_hcd->atl_queue;
0147
0148 if (epq)
0149 DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
0150 else
0151 pr_warn("%s: invalid PTD $%04x\n", __func__, offset);
0152
0153 return epq;
0154 }
0155
0156 static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
0157 {
0158 int offset;
0159
0160 if (index * epq->blk_size > epq->buf_size) {
0161 pr_warn("%s: Bad %s index %d(%d)\n",
0162 __func__, epq->name, index,
0163 epq->buf_size / epq->blk_size);
0164 return -EINVAL;
0165 }
0166 offset = epq->buf_start + index * epq->blk_size;
0167 DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
0168
0169 return offset;
0170 }
0171
0172
0173
0174 static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
0175 int mps)
0176 {
0177 u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
0178
0179 xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
0180 if (xfer_size < size && xfer_size % mps)
0181 xfer_size -= xfer_size % mps;
0182
0183 return xfer_size;
0184 }
0185
0186 static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
0187 struct isp1362_ep *ep, u16 len)
0188 {
0189 int ptd_offset = -EINVAL;
0190 int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
0191 int found;
0192
0193 BUG_ON(len > epq->buf_size);
0194
0195 if (!epq->buf_avail)
0196 return -ENOMEM;
0197
0198 if (ep->num_ptds)
0199 pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
0200 epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
0201 BUG_ON(ep->num_ptds != 0);
0202
0203 found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
0204 num_ptds, 0);
0205 if (found >= epq->buf_count)
0206 return -EOVERFLOW;
0207
0208 DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
0209 num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
0210 ptd_offset = get_ptd_offset(epq, found);
0211 WARN_ON(ptd_offset < 0);
0212 ep->ptd_offset = ptd_offset;
0213 ep->num_ptds += num_ptds;
0214 epq->buf_avail -= num_ptds;
0215 BUG_ON(epq->buf_avail > epq->buf_count);
0216 ep->ptd_index = found;
0217 bitmap_set(&epq->buf_map, found, num_ptds);
0218 DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
0219 __func__, epq->name, ep->ptd_index, ep->ptd_offset,
0220 epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
0221
0222 return found;
0223 }
0224
0225 static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
0226 {
0227 int last = ep->ptd_index + ep->num_ptds;
0228
0229 if (last > epq->buf_count)
0230 pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
0231 __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
0232 ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
0233 epq->buf_map, epq->skip_map);
0234 BUG_ON(last > epq->buf_count);
0235
0236 bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds);
0237 bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds);
0238 epq->buf_avail += ep->num_ptds;
0239 epq->ptd_count--;
0240
0241 BUG_ON(epq->buf_avail > epq->buf_count);
0242 BUG_ON(epq->ptd_count > epq->buf_count);
0243
0244 DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
0245 __func__, epq->name,
0246 ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
0247 DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
0248 epq->buf_map, epq->skip_map);
0249
0250 ep->num_ptds = 0;
0251 ep->ptd_offset = -EINVAL;
0252 ep->ptd_index = -EINVAL;
0253 }
0254
0255
0256
0257
0258
0259
0260 static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
0261 struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
0262 u16 fno)
0263 {
0264 struct ptd *ptd;
0265 int toggle;
0266 int dir;
0267 u16 len;
0268 size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
0269
0270 DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
0271
0272 ptd = &ep->ptd;
0273
0274 ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
0275
0276 switch (ep->nextpid) {
0277 case USB_PID_IN:
0278 toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
0279 dir = PTD_DIR_IN;
0280 if (usb_pipecontrol(urb->pipe)) {
0281 len = min_t(size_t, ep->maxpacket, buf_len);
0282 } else if (usb_pipeisoc(urb->pipe)) {
0283 len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
0284 ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
0285 } else
0286 len = max_transfer_size(epq, buf_len, ep->maxpacket);
0287 DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
0288 (int)buf_len);
0289 break;
0290 case USB_PID_OUT:
0291 toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
0292 dir = PTD_DIR_OUT;
0293 if (usb_pipecontrol(urb->pipe))
0294 len = min_t(size_t, ep->maxpacket, buf_len);
0295 else if (usb_pipeisoc(urb->pipe))
0296 len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
0297 else
0298 len = max_transfer_size(epq, buf_len, ep->maxpacket);
0299 if (len == 0)
0300 pr_info("%s: Sending ZERO packet: %d\n", __func__,
0301 urb->transfer_flags & URB_ZERO_PACKET);
0302 DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
0303 (int)buf_len);
0304 break;
0305 case USB_PID_SETUP:
0306 toggle = 0;
0307 dir = PTD_DIR_SETUP;
0308 len = sizeof(struct usb_ctrlrequest);
0309 DBG(1, "%s: SETUP len %d\n", __func__, len);
0310 ep->data = urb->setup_packet;
0311 break;
0312 case USB_PID_ACK:
0313 toggle = 1;
0314 len = 0;
0315 dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
0316 PTD_DIR_OUT : PTD_DIR_IN;
0317 DBG(1, "%s: ACK len %d\n", __func__, len);
0318 break;
0319 default:
0320 toggle = dir = len = 0;
0321 pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
0322 BUG_ON(1);
0323 }
0324
0325 ep->length = len;
0326 if (!len)
0327 ep->data = NULL;
0328
0329 ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
0330 ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
0331 PTD_EP(ep->epnum);
0332 ptd->len = PTD_LEN(len) | PTD_DIR(dir);
0333 ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
0334
0335 if (usb_pipeint(urb->pipe)) {
0336 ptd->faddr |= PTD_SF_INT(ep->branch);
0337 ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
0338 }
0339 if (usb_pipeisoc(urb->pipe))
0340 ptd->faddr |= PTD_SF_ISO(fno);
0341
0342 DBG(1, "%s: Finished\n", __func__);
0343 }
0344
0345 static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
0346 struct isp1362_ep_queue *epq)
0347 {
0348 struct ptd *ptd = &ep->ptd;
0349 int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
0350
0351 prefetch(ptd);
0352 isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
0353 if (len)
0354 isp1362_write_buffer(isp1362_hcd, ep->data,
0355 ep->ptd_offset + PTD_HEADER_SIZE, len);
0356
0357 dump_ptd(ptd);
0358 dump_ptd_out_data(ptd, ep->data);
0359 }
0360
0361 static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
0362 struct isp1362_ep_queue *epq)
0363 {
0364 struct ptd *ptd = &ep->ptd;
0365 int act_len;
0366
0367 WARN_ON(list_empty(&ep->active));
0368 BUG_ON(ep->ptd_offset < 0);
0369
0370 list_del_init(&ep->active);
0371 DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
0372
0373 prefetchw(ptd);
0374 isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
0375 dump_ptd(ptd);
0376 act_len = PTD_GET_COUNT(ptd);
0377 if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
0378 return;
0379 if (act_len > ep->length)
0380 pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
0381 ep->ptd_offset, act_len, ep->length);
0382 BUG_ON(act_len > ep->length);
0383
0384
0385
0386
0387 prefetchw(ep->data);
0388 isp1362_read_buffer(isp1362_hcd, ep->data,
0389 ep->ptd_offset + PTD_HEADER_SIZE, act_len);
0390 dump_ptd_in_data(ptd, ep->data);
0391 }
0392
0393
0394
0395
0396
0397
0398 static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
0399
0400 {
0401 int index;
0402 struct isp1362_ep_queue *epq;
0403
0404 DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
0405 BUG_ON(ep->ptd_offset < 0);
0406
0407 epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
0408 BUG_ON(!epq);
0409
0410
0411 WARN_ON(!list_empty(&ep->remove_list));
0412 list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
0413
0414 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
0415
0416 index = ep->ptd_index;
0417 if (index < 0)
0418
0419 return;
0420
0421 DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
0422 index, ep->ptd_offset, epq->skip_map, 1 << index);
0423
0424
0425 epq->skip_map |= 1 << index;
0426 if (epq == &isp1362_hcd->atl_queue) {
0427 DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
0428 isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
0429 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
0430 if (~epq->skip_map == 0)
0431 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
0432 } else if (epq == &isp1362_hcd->intl_queue) {
0433 DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
0434 isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
0435 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
0436 if (~epq->skip_map == 0)
0437 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
0438 }
0439 }
0440
0441
0442
0443
0444
0445 static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
0446 struct urb *urb, int status)
0447 __releases(isp1362_hcd->lock)
0448 __acquires(isp1362_hcd->lock)
0449 {
0450 urb->hcpriv = NULL;
0451 ep->error_count = 0;
0452
0453 if (usb_pipecontrol(urb->pipe))
0454 ep->nextpid = USB_PID_SETUP;
0455
0456 URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
0457 ep->num_req, usb_pipedevice(urb->pipe),
0458 usb_pipeendpoint(urb->pipe),
0459 !usb_pipein(urb->pipe) ? "out" : "in",
0460 usb_pipecontrol(urb->pipe) ? "ctrl" :
0461 usb_pipeint(urb->pipe) ? "int" :
0462 usb_pipebulk(urb->pipe) ? "bulk" :
0463 "iso",
0464 urb->actual_length, urb->transfer_buffer_length,
0465 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
0466 "short_ok" : "", urb->status);
0467
0468
0469 usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
0470 spin_unlock(&isp1362_hcd->lock);
0471 usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
0472 spin_lock(&isp1362_hcd->lock);
0473
0474
0475 if (!list_empty(&ep->hep->urb_list))
0476 return;
0477
0478
0479 if (!list_empty(&ep->schedule)) {
0480 list_del_init(&ep->schedule);
0481 return;
0482 }
0483
0484
0485 if (ep->interval) {
0486
0487 DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
0488 ep, ep->branch, ep->load,
0489 isp1362_hcd->load[ep->branch],
0490 isp1362_hcd->load[ep->branch] - ep->load);
0491 isp1362_hcd->load[ep->branch] -= ep->load;
0492 ep->branch = PERIODIC_SIZE;
0493 }
0494 }
0495
0496
0497
0498
0499 static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
0500 {
0501 struct urb *urb = get_urb(ep);
0502 struct usb_device *udev;
0503 struct ptd *ptd;
0504 int short_ok;
0505 u16 len;
0506 int urbstat = -EINPROGRESS;
0507 u8 cc;
0508
0509 DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
0510
0511 udev = urb->dev;
0512 ptd = &ep->ptd;
0513 cc = PTD_GET_CC(ptd);
0514 if (cc == PTD_NOTACCESSED) {
0515 pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
0516 ep->num_req, ptd);
0517 cc = PTD_DEVNOTRESP;
0518 }
0519
0520 short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
0521 len = urb->transfer_buffer_length - urb->actual_length;
0522
0523
0524
0525
0526
0527
0528
0529 if (cc == PTD_DATAUNDERRUN) {
0530 if (short_ok) {
0531 DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
0532 __func__, ep->num_req, short_ok ? "" : "not_",
0533 PTD_GET_COUNT(ptd), ep->maxpacket, len);
0534 cc = PTD_CC_NOERROR;
0535 urbstat = 0;
0536 } else {
0537 DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
0538 __func__, ep->num_req,
0539 usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
0540 short_ok ? "" : "not_",
0541 PTD_GET_COUNT(ptd), ep->maxpacket, len);
0542
0543
0544
0545 urb->actual_length += PTD_GET_COUNT(ptd);
0546 if (usb_pipecontrol(urb->pipe)) {
0547 ep->nextpid = USB_PID_ACK;
0548 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
0549
0550 if (urb->status == -EINPROGRESS)
0551 urb->status = cc_to_error[PTD_DATAUNDERRUN];
0552 } else {
0553 usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
0554 PTD_GET_TOGGLE(ptd));
0555 urbstat = cc_to_error[PTD_DATAUNDERRUN];
0556 }
0557 goto out;
0558 }
0559 }
0560
0561 if (cc != PTD_CC_NOERROR) {
0562 if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
0563 urbstat = cc_to_error[cc];
0564 DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
0565 __func__, ep->num_req, ep->nextpid, urbstat, cc,
0566 ep->error_count);
0567 }
0568 goto out;
0569 }
0570
0571 switch (ep->nextpid) {
0572 case USB_PID_OUT:
0573 if (PTD_GET_COUNT(ptd) != ep->length)
0574 pr_err("%s: count=%d len=%d\n", __func__,
0575 PTD_GET_COUNT(ptd), ep->length);
0576 BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
0577 urb->actual_length += ep->length;
0578 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
0579 usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
0580 if (urb->actual_length == urb->transfer_buffer_length) {
0581 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
0582 ep->num_req, len, ep->maxpacket, urbstat);
0583 if (usb_pipecontrol(urb->pipe)) {
0584 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
0585 ep->num_req,
0586 usb_pipein(urb->pipe) ? "IN" : "OUT");
0587 ep->nextpid = USB_PID_ACK;
0588 } else {
0589 if (len % ep->maxpacket ||
0590 !(urb->transfer_flags & URB_ZERO_PACKET)) {
0591 urbstat = 0;
0592 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
0593 __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
0594 urbstat, len, ep->maxpacket, urb->actual_length);
0595 }
0596 }
0597 }
0598 break;
0599 case USB_PID_IN:
0600 len = PTD_GET_COUNT(ptd);
0601 BUG_ON(len > ep->length);
0602 urb->actual_length += len;
0603 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
0604 usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
0605
0606 if ((urb->transfer_buffer_length == urb->actual_length) ||
0607 len % ep->maxpacket) {
0608 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
0609 ep->num_req, len, ep->maxpacket, urbstat);
0610 if (usb_pipecontrol(urb->pipe)) {
0611 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
0612 ep->num_req,
0613 usb_pipein(urb->pipe) ? "IN" : "OUT");
0614 ep->nextpid = USB_PID_ACK;
0615 } else {
0616 urbstat = 0;
0617 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
0618 __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
0619 urbstat, len, ep->maxpacket, urb->actual_length);
0620 }
0621 }
0622 break;
0623 case USB_PID_SETUP:
0624 if (urb->transfer_buffer_length == urb->actual_length) {
0625 ep->nextpid = USB_PID_ACK;
0626 } else if (usb_pipeout(urb->pipe)) {
0627 usb_settoggle(udev, 0, 1, 1);
0628 ep->nextpid = USB_PID_OUT;
0629 } else {
0630 usb_settoggle(udev, 0, 0, 1);
0631 ep->nextpid = USB_PID_IN;
0632 }
0633 break;
0634 case USB_PID_ACK:
0635 DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
0636 urbstat);
0637 WARN_ON(urbstat != -EINPROGRESS);
0638 urbstat = 0;
0639 ep->nextpid = 0;
0640 break;
0641 default:
0642 BUG_ON(1);
0643 }
0644
0645 out:
0646 if (urbstat != -EINPROGRESS) {
0647 DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
0648 ep, ep->num_req, urb, urbstat);
0649 finish_request(isp1362_hcd, ep, urb, urbstat);
0650 }
0651 }
0652
0653 static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
0654 {
0655 struct isp1362_ep *ep;
0656 struct isp1362_ep *tmp;
0657
0658 list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
0659 struct isp1362_ep_queue *epq =
0660 get_ptd_queue(isp1362_hcd, ep->ptd_offset);
0661 int index = ep->ptd_index;
0662
0663 BUG_ON(epq == NULL);
0664 if (index >= 0) {
0665 DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
0666 BUG_ON(ep->num_ptds == 0);
0667 release_ptd_buffers(epq, ep);
0668 }
0669 if (!list_empty(&ep->hep->urb_list)) {
0670 struct urb *urb = get_urb(ep);
0671
0672 DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
0673 ep->num_req, ep);
0674 finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
0675 }
0676 WARN_ON(list_empty(&ep->active));
0677 if (!list_empty(&ep->active)) {
0678 list_del_init(&ep->active);
0679 DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
0680 }
0681 list_del_init(&ep->remove_list);
0682 DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
0683 }
0684 DBG(1, "%s: Done\n", __func__);
0685 }
0686
0687 static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
0688 {
0689 if (count > 0) {
0690 if (count < isp1362_hcd->atl_queue.ptd_count)
0691 isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
0692 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
0693 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
0694 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
0695 } else
0696 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
0697 }
0698
0699 static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
0700 {
0701 isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
0702 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
0703 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
0704 }
0705
0706 static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
0707 {
0708 isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
0709 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
0710 HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
0711 }
0712
0713 static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
0714 struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
0715 {
0716 int index;
0717
0718 prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
0719 index = claim_ptd_buffers(epq, ep, ep->length);
0720 if (index == -ENOMEM) {
0721 DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
0722 ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
0723 return index;
0724 } else if (index == -EOVERFLOW) {
0725 DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
0726 __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
0727 epq->buf_map, epq->skip_map);
0728 return index;
0729 } else
0730 BUG_ON(index < 0);
0731 list_add_tail(&ep->active, &epq->active);
0732 DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
0733 ep, ep->num_req, ep->length, &epq->active);
0734 DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
0735 ep->ptd_offset, ep, ep->num_req);
0736 isp1362_write_ptd(isp1362_hcd, ep, epq);
0737 __clear_bit(ep->ptd_index, &epq->skip_map);
0738
0739 return 0;
0740 }
0741
0742 static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
0743 {
0744 int ptd_count = 0;
0745 struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
0746 struct isp1362_ep *ep;
0747 int defer = 0;
0748
0749 if (atomic_read(&epq->finishing)) {
0750 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
0751 return;
0752 }
0753
0754 list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
0755 struct urb *urb = get_urb(ep);
0756 int ret;
0757
0758 if (!list_empty(&ep->active)) {
0759 DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
0760 continue;
0761 }
0762
0763 DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
0764 ep, ep->num_req);
0765
0766 ret = submit_req(isp1362_hcd, urb, ep, epq);
0767 if (ret == -ENOMEM) {
0768 defer = 1;
0769 break;
0770 } else if (ret == -EOVERFLOW) {
0771 defer = 1;
0772 continue;
0773 }
0774 #ifdef BUGGY_PXA2XX_UDC_USBTEST
0775 defer = ep->nextpid == USB_PID_SETUP;
0776 #endif
0777 ptd_count++;
0778 }
0779
0780
0781 if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
0782 DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
0783 list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
0784 }
0785 if (ptd_count || defer)
0786 enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
0787
0788 epq->ptd_count += ptd_count;
0789 if (epq->ptd_count > epq->stat_maxptds) {
0790 epq->stat_maxptds = epq->ptd_count;
0791 DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
0792 }
0793 }
0794
0795 static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
0796 {
0797 int ptd_count = 0;
0798 struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
0799 struct isp1362_ep *ep;
0800
0801 if (atomic_read(&epq->finishing)) {
0802 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
0803 return;
0804 }
0805
0806 list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
0807 struct urb *urb = get_urb(ep);
0808 int ret;
0809
0810 if (!list_empty(&ep->active)) {
0811 DBG(1, "%s: Skipping active %s ep %p\n", __func__,
0812 epq->name, ep);
0813 continue;
0814 }
0815
0816 DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
0817 epq->name, ep, ep->num_req);
0818 ret = submit_req(isp1362_hcd, urb, ep, epq);
0819 if (ret == -ENOMEM)
0820 break;
0821 else if (ret == -EOVERFLOW)
0822 continue;
0823 ptd_count++;
0824 }
0825
0826 if (ptd_count) {
0827 static int last_count;
0828
0829 if (ptd_count != last_count) {
0830 DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
0831 last_count = ptd_count;
0832 }
0833 enable_intl_transfers(isp1362_hcd);
0834 }
0835
0836 epq->ptd_count += ptd_count;
0837 if (epq->ptd_count > epq->stat_maxptds)
0838 epq->stat_maxptds = epq->ptd_count;
0839 }
0840
0841 static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
0842 {
0843 u16 ptd_offset = ep->ptd_offset;
0844 int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
0845
0846 DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
0847 ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
0848
0849 ptd_offset += num_ptds * epq->blk_size;
0850 if (ptd_offset < epq->buf_start + epq->buf_size)
0851 return ptd_offset;
0852 else
0853 return -ENOMEM;
0854 }
0855
0856 static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
0857 {
0858 int ptd_count = 0;
0859 int flip = isp1362_hcd->istl_flip;
0860 struct isp1362_ep_queue *epq;
0861 int ptd_offset;
0862 struct isp1362_ep *ep;
0863 struct isp1362_ep *tmp;
0864 u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
0865
0866 fill2:
0867 epq = &isp1362_hcd->istl_queue[flip];
0868 if (atomic_read(&epq->finishing)) {
0869 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
0870 return;
0871 }
0872
0873 if (!list_empty(&epq->active))
0874 return;
0875
0876 ptd_offset = epq->buf_start;
0877 list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
0878 struct urb *urb = get_urb(ep);
0879 s16 diff = fno - (u16)urb->start_frame;
0880
0881 DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
0882
0883 if (diff > urb->number_of_packets) {
0884
0885 finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
0886 continue;
0887 } else if (diff < -1) {
0888
0889
0890
0891
0892
0893 } else if (diff == -1) {
0894
0895 prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
0896 if (ptd_offset + PTD_HEADER_SIZE + ep->length >
0897 epq->buf_start + epq->buf_size) {
0898 pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
0899 __func__, ep->length);
0900 continue;
0901 }
0902 ep->ptd_offset = ptd_offset;
0903 list_add_tail(&ep->active, &epq->active);
0904
0905 ptd_offset = next_ptd(epq, ep);
0906 if (ptd_offset < 0) {
0907 pr_warn("%s: req %d No more %s PTD buffers available\n",
0908 __func__, ep->num_req, epq->name);
0909 break;
0910 }
0911 }
0912 }
0913 list_for_each_entry(ep, &epq->active, active) {
0914 if (epq->active.next == &ep->active)
0915 ep->ptd.mps |= PTD_LAST_MSK;
0916 isp1362_write_ptd(isp1362_hcd, ep, epq);
0917 ptd_count++;
0918 }
0919
0920 if (ptd_count)
0921 enable_istl_transfers(isp1362_hcd, flip);
0922
0923 epq->ptd_count += ptd_count;
0924 if (epq->ptd_count > epq->stat_maxptds)
0925 epq->stat_maxptds = epq->ptd_count;
0926
0927
0928 if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
0929 (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
0930 fno++;
0931 ptd_count = 0;
0932 flip = 1 - flip;
0933 goto fill2;
0934 }
0935 }
0936
0937 static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
0938 struct isp1362_ep_queue *epq)
0939 {
0940 struct isp1362_ep *ep;
0941 struct isp1362_ep *tmp;
0942
0943 if (list_empty(&epq->active)) {
0944 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
0945 return;
0946 }
0947
0948 DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
0949
0950 atomic_inc(&epq->finishing);
0951 list_for_each_entry_safe(ep, tmp, &epq->active, active) {
0952 int index = ep->ptd_index;
0953
0954 DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
0955 index, ep->ptd_offset);
0956
0957 BUG_ON(index < 0);
0958 if (__test_and_clear_bit(index, &done_map)) {
0959 isp1362_read_ptd(isp1362_hcd, ep, epq);
0960 epq->free_ptd = index;
0961 BUG_ON(ep->num_ptds == 0);
0962 release_ptd_buffers(epq, ep);
0963
0964 DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
0965 ep, ep->num_req);
0966 if (!list_empty(&ep->remove_list)) {
0967 list_del_init(&ep->remove_list);
0968 DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
0969 }
0970 DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
0971 ep, ep->num_req);
0972 postproc_ep(isp1362_hcd, ep);
0973 }
0974 if (!done_map)
0975 break;
0976 }
0977 if (done_map)
0978 pr_warn("%s: done_map not clear: %08lx:%08lx\n",
0979 __func__, done_map, epq->skip_map);
0980 atomic_dec(&epq->finishing);
0981 }
0982
0983 static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
0984 {
0985 struct isp1362_ep *ep;
0986 struct isp1362_ep *tmp;
0987
0988 if (list_empty(&epq->active)) {
0989 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
0990 return;
0991 }
0992
0993 DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
0994
0995 atomic_inc(&epq->finishing);
0996 list_for_each_entry_safe(ep, tmp, &epq->active, active) {
0997 DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
0998
0999 isp1362_read_ptd(isp1362_hcd, ep, epq);
1000 DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
1001 postproc_ep(isp1362_hcd, ep);
1002 }
1003 WARN_ON(epq->blk_size != 0);
1004 atomic_dec(&epq->finishing);
1005 }
1006
1007 static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1008 {
1009 int handled = 0;
1010 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1011 u16 irqstat;
1012 u16 svc_mask;
1013
1014 spin_lock(&isp1362_hcd->lock);
1015
1016 BUG_ON(isp1362_hcd->irq_active++);
1017
1018 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1019
1020 irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1021 DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1022
1023
1024 irqstat &= isp1362_hcd->irqenb;
1025 isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1026 svc_mask = irqstat;
1027
1028 if (irqstat & HCuPINT_SOF) {
1029 isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1030 isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1031 handled = 1;
1032 svc_mask &= ~HCuPINT_SOF;
1033 DBG(3, "%s: SOF\n", __func__);
1034 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1035 if (!list_empty(&isp1362_hcd->remove_list))
1036 finish_unlinks(isp1362_hcd);
1037 if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1038 if (list_empty(&isp1362_hcd->atl_queue.active)) {
1039 start_atl_transfers(isp1362_hcd);
1040 } else {
1041 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1042 isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1043 isp1362_hcd->atl_queue.skip_map);
1044 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1045 }
1046 }
1047 }
1048
1049 if (irqstat & HCuPINT_ISTL0) {
1050 isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1051 handled = 1;
1052 svc_mask &= ~HCuPINT_ISTL0;
1053 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1054 DBG(1, "%s: ISTL0\n", __func__);
1055 WARN_ON((int)!!isp1362_hcd->istl_flip);
1056 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1057 HCBUFSTAT_ISTL0_ACTIVE);
1058 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1059 HCBUFSTAT_ISTL0_DONE));
1060 isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1061 }
1062
1063 if (irqstat & HCuPINT_ISTL1) {
1064 isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1065 handled = 1;
1066 svc_mask &= ~HCuPINT_ISTL1;
1067 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1068 DBG(1, "%s: ISTL1\n", __func__);
1069 WARN_ON(!(int)isp1362_hcd->istl_flip);
1070 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1071 HCBUFSTAT_ISTL1_ACTIVE);
1072 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1073 HCBUFSTAT_ISTL1_DONE));
1074 isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1075 }
1076
1077 if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1078 WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1079 (HCuPINT_ISTL0 | HCuPINT_ISTL1));
1080 finish_iso_transfers(isp1362_hcd,
1081 &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1082 start_iso_transfers(isp1362_hcd);
1083 isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1084 }
1085
1086 if (irqstat & HCuPINT_INTL) {
1087 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1088 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1089 isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1090
1091 DBG(2, "%s: INTL\n", __func__);
1092
1093 svc_mask &= ~HCuPINT_INTL;
1094
1095 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1096 if (~(done_map | skip_map) == 0)
1097
1098 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1099
1100 handled = 1;
1101 WARN_ON(!done_map);
1102 if (done_map) {
1103 DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1104 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1105 start_intl_transfers(isp1362_hcd);
1106 }
1107 }
1108
1109 if (irqstat & HCuPINT_ATL) {
1110 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1111 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1112 isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1113
1114 DBG(2, "%s: ATL\n", __func__);
1115
1116 svc_mask &= ~HCuPINT_ATL;
1117
1118 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1119 if (~(done_map | skip_map) == 0)
1120 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1121 if (done_map) {
1122 DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1123 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1124 start_atl_transfers(isp1362_hcd);
1125 }
1126 handled = 1;
1127 }
1128
1129 if (irqstat & HCuPINT_OPR) {
1130 u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1131 isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1132
1133 svc_mask &= ~HCuPINT_OPR;
1134 DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1135 intstat &= isp1362_hcd->intenb;
1136 if (intstat & OHCI_INTR_UE) {
1137 pr_err("Unrecoverable error\n");
1138
1139 }
1140 if (intstat & OHCI_INTR_RHSC) {
1141 isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1142 isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1143 isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1144 }
1145 if (intstat & OHCI_INTR_RD) {
1146 pr_info("%s: RESUME DETECTED\n", __func__);
1147 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1148 usb_hcd_resume_root_hub(hcd);
1149 }
1150 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1151 irqstat &= ~HCuPINT_OPR;
1152 handled = 1;
1153 }
1154
1155 if (irqstat & HCuPINT_SUSP) {
1156 isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1157 handled = 1;
1158 svc_mask &= ~HCuPINT_SUSP;
1159
1160 pr_info("%s: SUSPEND IRQ\n", __func__);
1161 }
1162
1163 if (irqstat & HCuPINT_CLKRDY) {
1164 isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1165 handled = 1;
1166 isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1167 svc_mask &= ~HCuPINT_CLKRDY;
1168 pr_info("%s: CLKRDY IRQ\n", __func__);
1169 }
1170
1171 if (svc_mask)
1172 pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1173
1174 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1175 isp1362_hcd->irq_active--;
1176 spin_unlock(&isp1362_hcd->lock);
1177
1178 return IRQ_RETVAL(handled);
1179 }
1180
1181
1182
1183 #define MAX_PERIODIC_LOAD 900
1184 static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1185 {
1186 int i, branch = -ENOSPC;
1187
1188
1189
1190
1191 for (i = 0; i < interval; i++) {
1192 if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1193 int j;
1194
1195 for (j = i; j < PERIODIC_SIZE; j += interval) {
1196 if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1197 pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1198 load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1199 break;
1200 }
1201 }
1202 if (j < PERIODIC_SIZE)
1203 continue;
1204 branch = i;
1205 }
1206 }
1207 return branch;
1208 }
1209
1210
1211
1212
1213
1214
1215
1216 static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1217 struct urb *urb,
1218 gfp_t mem_flags)
1219 {
1220 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1221 struct usb_device *udev = urb->dev;
1222 unsigned int pipe = urb->pipe;
1223 int is_out = !usb_pipein(pipe);
1224 int type = usb_pipetype(pipe);
1225 int epnum = usb_pipeendpoint(pipe);
1226 struct usb_host_endpoint *hep = urb->ep;
1227 struct isp1362_ep *ep = NULL;
1228 unsigned long flags;
1229 int retval = 0;
1230
1231 DBG(3, "%s: urb %p\n", __func__, urb);
1232
1233 if (type == PIPE_ISOCHRONOUS) {
1234 pr_err("Isochronous transfers not supported\n");
1235 return -ENOSPC;
1236 }
1237
1238 URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1239 usb_pipedevice(pipe), epnum,
1240 is_out ? "out" : "in",
1241 usb_pipecontrol(pipe) ? "ctrl" :
1242 usb_pipeint(pipe) ? "int" :
1243 usb_pipebulk(pipe) ? "bulk" :
1244 "iso",
1245 urb->transfer_buffer_length,
1246 (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1247 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1248 "short_ok" : "");
1249
1250
1251 if (!hep->hcpriv) {
1252 ep = kzalloc(sizeof *ep, mem_flags);
1253 if (!ep)
1254 return -ENOMEM;
1255 }
1256 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1257
1258
1259 if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
1260 USB_PORT_STAT_ENABLE) ||
1261 !HC_IS_RUNNING(hcd->state)) {
1262 kfree(ep);
1263 retval = -ENODEV;
1264 goto fail_not_linked;
1265 }
1266
1267 retval = usb_hcd_link_urb_to_ep(hcd, urb);
1268 if (retval) {
1269 kfree(ep);
1270 goto fail_not_linked;
1271 }
1272
1273 if (hep->hcpriv) {
1274 ep = hep->hcpriv;
1275 } else {
1276 INIT_LIST_HEAD(&ep->schedule);
1277 INIT_LIST_HEAD(&ep->active);
1278 INIT_LIST_HEAD(&ep->remove_list);
1279 ep->udev = usb_get_dev(udev);
1280 ep->hep = hep;
1281 ep->epnum = epnum;
1282 ep->maxpacket = usb_maxpacket(udev, urb->pipe);
1283 ep->ptd_offset = -EINVAL;
1284 ep->ptd_index = -EINVAL;
1285 usb_settoggle(udev, epnum, is_out, 0);
1286
1287 if (type == PIPE_CONTROL)
1288 ep->nextpid = USB_PID_SETUP;
1289 else if (is_out)
1290 ep->nextpid = USB_PID_OUT;
1291 else
1292 ep->nextpid = USB_PID_IN;
1293
1294 switch (type) {
1295 case PIPE_ISOCHRONOUS:
1296 case PIPE_INTERRUPT:
1297 if (urb->interval > PERIODIC_SIZE)
1298 urb->interval = PERIODIC_SIZE;
1299 ep->interval = urb->interval;
1300 ep->branch = PERIODIC_SIZE;
1301 ep->load = usb_calc_bus_time(udev->speed, !is_out,
1302 type == PIPE_ISOCHRONOUS,
1303 usb_maxpacket(udev, pipe)) / 1000;
1304 break;
1305 }
1306 hep->hcpriv = ep;
1307 }
1308 ep->num_req = isp1362_hcd->req_serial++;
1309
1310
1311 switch (type) {
1312 case PIPE_CONTROL:
1313 case PIPE_BULK:
1314 if (list_empty(&ep->schedule)) {
1315 DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1316 __func__, ep, ep->num_req);
1317 list_add_tail(&ep->schedule, &isp1362_hcd->async);
1318 }
1319 break;
1320 case PIPE_ISOCHRONOUS:
1321 case PIPE_INTERRUPT:
1322 urb->interval = ep->interval;
1323
1324
1325 if (ep->branch < PERIODIC_SIZE)
1326 break;
1327
1328 retval = balance(isp1362_hcd, ep->interval, ep->load);
1329 if (retval < 0) {
1330 pr_err("%s: balance returned %d\n", __func__, retval);
1331 goto fail;
1332 }
1333 ep->branch = retval;
1334 retval = 0;
1335 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1336 DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1337 __func__, isp1362_hcd->fmindex, ep->branch,
1338 ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1339 ~(PERIODIC_SIZE - 1)) + ep->branch,
1340 (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1341
1342 if (list_empty(&ep->schedule)) {
1343 if (type == PIPE_ISOCHRONOUS) {
1344 u16 frame = isp1362_hcd->fmindex;
1345
1346 frame += max_t(u16, 8, ep->interval);
1347 frame &= ~(ep->interval - 1);
1348 frame |= ep->branch;
1349 if (frame_before(frame, isp1362_hcd->fmindex))
1350 frame += ep->interval;
1351 urb->start_frame = frame;
1352
1353 DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1354 list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1355 } else {
1356 DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1357 list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1358 }
1359 } else
1360 DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1361
1362 DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1363 ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1364 isp1362_hcd->load[ep->branch] + ep->load);
1365 isp1362_hcd->load[ep->branch] += ep->load;
1366 }
1367
1368 urb->hcpriv = hep;
1369 ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1370
1371 switch (type) {
1372 case PIPE_CONTROL:
1373 case PIPE_BULK:
1374 start_atl_transfers(isp1362_hcd);
1375 break;
1376 case PIPE_INTERRUPT:
1377 start_intl_transfers(isp1362_hcd);
1378 break;
1379 case PIPE_ISOCHRONOUS:
1380 start_iso_transfers(isp1362_hcd);
1381 break;
1382 default:
1383 BUG();
1384 }
1385 fail:
1386 if (retval)
1387 usb_hcd_unlink_urb_from_ep(hcd, urb);
1388
1389
1390 fail_not_linked:
1391 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1392 if (retval)
1393 DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1394 return retval;
1395 }
1396
1397 static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1398 {
1399 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1400 struct usb_host_endpoint *hep;
1401 unsigned long flags;
1402 struct isp1362_ep *ep;
1403 int retval = 0;
1404
1405 DBG(3, "%s: urb %p\n", __func__, urb);
1406
1407 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1408 retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1409 if (retval)
1410 goto done;
1411
1412 hep = urb->hcpriv;
1413
1414 if (!hep) {
1415 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1416 return -EIDRM;
1417 }
1418
1419 ep = hep->hcpriv;
1420 if (ep) {
1421
1422 if (ep->hep->urb_list.next == &urb->urb_list) {
1423 if (!list_empty(&ep->active)) {
1424 DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1425 urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1426
1427 remove_ptd(isp1362_hcd, ep);
1428 urb = NULL;
1429 }
1430 }
1431 if (urb) {
1432 DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1433 ep->num_req);
1434 finish_request(isp1362_hcd, ep, urb, status);
1435 } else
1436 DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1437 } else {
1438 pr_warn("%s: No EP in URB %p\n", __func__, urb);
1439 retval = -EINVAL;
1440 }
1441 done:
1442 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1443
1444 DBG(3, "%s: exit\n", __func__);
1445
1446 return retval;
1447 }
1448
1449 static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1450 {
1451 struct isp1362_ep *ep = hep->hcpriv;
1452 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1453 unsigned long flags;
1454
1455 DBG(1, "%s: ep %p\n", __func__, ep);
1456 if (!ep)
1457 return;
1458 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1459 if (!list_empty(&hep->urb_list)) {
1460 if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1461 DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1462 ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1463 remove_ptd(isp1362_hcd, ep);
1464 pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1465 }
1466 }
1467 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1468
1469 while (!list_empty(&ep->active))
1470 msleep(1);
1471
1472 DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1473
1474 usb_put_dev(ep->udev);
1475 kfree(ep);
1476 hep->hcpriv = NULL;
1477 }
1478
1479 static int isp1362_get_frame(struct usb_hcd *hcd)
1480 {
1481 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1482 u32 fmnum;
1483 unsigned long flags;
1484
1485 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1486 fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1487 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1488
1489 return (int)fmnum;
1490 }
1491
1492
1493
1494
1495 static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1496 {
1497 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1498 int ports, i, changed = 0;
1499 unsigned long flags;
1500
1501 if (!HC_IS_RUNNING(hcd->state))
1502 return -ESHUTDOWN;
1503
1504
1505
1506 if (timer_pending(&hcd->rh_timer))
1507 return 0;
1508
1509 ports = isp1362_hcd->rhdesca & RH_A_NDP;
1510 BUG_ON(ports > 2);
1511
1512 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1513
1514 if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1515 buf[0] = changed = 1;
1516 else
1517 buf[0] = 0;
1518
1519 for (i = 0; i < ports; i++) {
1520 u32 status = isp1362_hcd->rhport[i];
1521
1522 if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1523 RH_PS_OCIC | RH_PS_PRSC)) {
1524 changed = 1;
1525 buf[0] |= 1 << (i + 1);
1526 continue;
1527 }
1528
1529 if (!(status & RH_PS_CCS))
1530 continue;
1531 }
1532 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1533 return changed;
1534 }
1535
1536 static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1537 struct usb_hub_descriptor *desc)
1538 {
1539 u32 reg = isp1362_hcd->rhdesca;
1540
1541 DBG(3, "%s: enter\n", __func__);
1542
1543 desc->bDescriptorType = USB_DT_HUB;
1544 desc->bDescLength = 9;
1545 desc->bHubContrCurrent = 0;
1546 desc->bNbrPorts = reg & 0x3;
1547
1548 desc->wHubCharacteristics = cpu_to_le16((reg >> 8) &
1549 (HUB_CHAR_LPSM |
1550 HUB_CHAR_COMPOUND |
1551 HUB_CHAR_OCPM));
1552 DBG(0, "%s: hubcharacteristics = %02x\n", __func__,
1553 desc->wHubCharacteristics);
1554 desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
1555
1556 desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1557 desc->u.hs.DeviceRemovable[1] = ~0;
1558
1559 DBG(3, "%s: exit\n", __func__);
1560 }
1561
1562
1563 static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1564 u16 wIndex, char *buf, u16 wLength)
1565 {
1566 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1567 int retval = 0;
1568 unsigned long flags;
1569 unsigned long t1;
1570 int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1571 u32 tmp = 0;
1572
1573 switch (typeReq) {
1574 case ClearHubFeature:
1575 DBG(0, "ClearHubFeature: ");
1576 switch (wValue) {
1577 case C_HUB_OVER_CURRENT:
1578 DBG(0, "C_HUB_OVER_CURRENT\n");
1579 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1580 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1581 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1582 break;
1583 case C_HUB_LOCAL_POWER:
1584 DBG(0, "C_HUB_LOCAL_POWER\n");
1585 break;
1586 default:
1587 goto error;
1588 }
1589 break;
1590 case SetHubFeature:
1591 DBG(0, "SetHubFeature: ");
1592 switch (wValue) {
1593 case C_HUB_OVER_CURRENT:
1594 case C_HUB_LOCAL_POWER:
1595 DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1596 break;
1597 default:
1598 goto error;
1599 }
1600 break;
1601 case GetHubDescriptor:
1602 DBG(0, "GetHubDescriptor\n");
1603 isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1604 break;
1605 case GetHubStatus:
1606 DBG(0, "GetHubStatus\n");
1607 put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1608 break;
1609 case GetPortStatus:
1610 #ifndef VERBOSE
1611 DBG(0, "GetPortStatus\n");
1612 #endif
1613 if (!wIndex || wIndex > ports)
1614 goto error;
1615 tmp = isp1362_hcd->rhport[--wIndex];
1616 put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1617 break;
1618 case ClearPortFeature:
1619 DBG(0, "ClearPortFeature: ");
1620 if (!wIndex || wIndex > ports)
1621 goto error;
1622 wIndex--;
1623
1624 switch (wValue) {
1625 case USB_PORT_FEAT_ENABLE:
1626 DBG(0, "USB_PORT_FEAT_ENABLE\n");
1627 tmp = RH_PS_CCS;
1628 break;
1629 case USB_PORT_FEAT_C_ENABLE:
1630 DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1631 tmp = RH_PS_PESC;
1632 break;
1633 case USB_PORT_FEAT_SUSPEND:
1634 DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1635 tmp = RH_PS_POCI;
1636 break;
1637 case USB_PORT_FEAT_C_SUSPEND:
1638 DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1639 tmp = RH_PS_PSSC;
1640 break;
1641 case USB_PORT_FEAT_POWER:
1642 DBG(0, "USB_PORT_FEAT_POWER\n");
1643 tmp = RH_PS_LSDA;
1644
1645 break;
1646 case USB_PORT_FEAT_C_CONNECTION:
1647 DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1648 tmp = RH_PS_CSC;
1649 break;
1650 case USB_PORT_FEAT_C_OVER_CURRENT:
1651 DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1652 tmp = RH_PS_OCIC;
1653 break;
1654 case USB_PORT_FEAT_C_RESET:
1655 DBG(0, "USB_PORT_FEAT_C_RESET\n");
1656 tmp = RH_PS_PRSC;
1657 break;
1658 default:
1659 goto error;
1660 }
1661
1662 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1663 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1664 isp1362_hcd->rhport[wIndex] =
1665 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1666 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1667 break;
1668 case SetPortFeature:
1669 DBG(0, "SetPortFeature: ");
1670 if (!wIndex || wIndex > ports)
1671 goto error;
1672 wIndex--;
1673 switch (wValue) {
1674 case USB_PORT_FEAT_SUSPEND:
1675 DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1676 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1677 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1678 isp1362_hcd->rhport[wIndex] =
1679 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1680 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1681 break;
1682 case USB_PORT_FEAT_POWER:
1683 DBG(0, "USB_PORT_FEAT_POWER\n");
1684 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1685 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1686 isp1362_hcd->rhport[wIndex] =
1687 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1688 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1689 break;
1690 case USB_PORT_FEAT_RESET:
1691 DBG(0, "USB_PORT_FEAT_RESET\n");
1692 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1693
1694 t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
1695 while (time_before(jiffies, t1)) {
1696
1697 for (;;) {
1698 tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1699 if (!(tmp & RH_PS_PRS))
1700 break;
1701 udelay(500);
1702 }
1703 if (!(tmp & RH_PS_CCS))
1704 break;
1705
1706 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
1707
1708 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1709 msleep(10);
1710 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1711 }
1712
1713 isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
1714 HCRHPORT1 + wIndex);
1715 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1716 break;
1717 default:
1718 goto error;
1719 }
1720 break;
1721
1722 default:
1723 error:
1724
1725 DBG(0, "PROTOCOL STALL\n");
1726 retval = -EPIPE;
1727 }
1728
1729 return retval;
1730 }
1731
1732 #ifdef CONFIG_PM
1733 static int isp1362_bus_suspend(struct usb_hcd *hcd)
1734 {
1735 int status = 0;
1736 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1737 unsigned long flags;
1738
1739 if (time_before(jiffies, isp1362_hcd->next_statechange))
1740 msleep(5);
1741
1742 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1743
1744 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1745 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1746 case OHCI_USB_RESUME:
1747 DBG(0, "%s: resume/suspend?\n", __func__);
1748 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1749 isp1362_hcd->hc_control |= OHCI_USB_RESET;
1750 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1751 fallthrough;
1752 case OHCI_USB_RESET:
1753 status = -EBUSY;
1754 pr_warn("%s: needs reinit!\n", __func__);
1755 goto done;
1756 case OHCI_USB_SUSPEND:
1757 pr_warn("%s: already suspended?\n", __func__);
1758 goto done;
1759 }
1760 DBG(0, "%s: suspend root hub\n", __func__);
1761
1762
1763 hcd->state = HC_STATE_QUIESCING;
1764 if (!list_empty(&isp1362_hcd->atl_queue.active) ||
1765 !list_empty(&isp1362_hcd->intl_queue.active) ||
1766 !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
1767 !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
1768 int limit;
1769
1770 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
1771 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
1772 isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
1773 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1774 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
1775
1776 DBG(0, "%s: stopping schedules ...\n", __func__);
1777 limit = 2000;
1778 while (limit > 0) {
1779 udelay(250);
1780 limit -= 250;
1781 if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
1782 break;
1783 }
1784 mdelay(7);
1785 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
1786 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1787 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1788 }
1789 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
1790 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1791 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1792 }
1793 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
1794 finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
1795 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
1796 finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
1797 }
1798 DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
1799 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1800 isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
1801 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1802
1803
1804 isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
1805 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1806 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1807 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1808
1809 #if 1
1810 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1811 if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
1812 pr_err("%s: controller won't suspend %08x\n", __func__,
1813 isp1362_hcd->hc_control);
1814 status = -EBUSY;
1815 } else
1816 #endif
1817 {
1818
1819 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
1820 }
1821 done:
1822 if (status == 0) {
1823 hcd->state = HC_STATE_SUSPENDED;
1824 DBG(0, "%s: HCD suspended: %08x\n", __func__,
1825 isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1826 }
1827 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1828 return status;
1829 }
1830
1831 static int isp1362_bus_resume(struct usb_hcd *hcd)
1832 {
1833 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1834 u32 port;
1835 unsigned long flags;
1836 int status = -EINPROGRESS;
1837
1838 if (time_before(jiffies, isp1362_hcd->next_statechange))
1839 msleep(5);
1840
1841 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1842 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1843 pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
1844 if (hcd->state == HC_STATE_RESUMING) {
1845 pr_warn("%s: duplicate resume\n", __func__);
1846 status = 0;
1847 } else
1848 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1849 case OHCI_USB_SUSPEND:
1850 DBG(0, "%s: resume root hub\n", __func__);
1851 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1852 isp1362_hcd->hc_control |= OHCI_USB_RESUME;
1853 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1854 break;
1855 case OHCI_USB_RESUME:
1856
1857 DBG(0, "%s: remote wakeup\n", __func__);
1858 break;
1859 case OHCI_USB_OPER:
1860 DBG(0, "%s: odd resume\n", __func__);
1861 status = 0;
1862 hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1863 break;
1864 default:
1865 DBG(0, "%s: root hub hardware reset\n", __func__);
1866 status = -EBUSY;
1867 }
1868 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1869 if (status == -EBUSY) {
1870 DBG(0, "%s: Restarting HC\n", __func__);
1871 isp1362_hc_stop(hcd);
1872 return isp1362_hc_start(hcd);
1873 }
1874 if (status != -EINPROGRESS)
1875 return status;
1876 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1877 port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
1878 while (port--) {
1879 u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
1880
1881
1882 if (!(stat & RH_PS_PSS)) {
1883 DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
1884 continue;
1885 }
1886 DBG(0, "%s: Resuming RH port %d\n", __func__, port);
1887 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
1888 }
1889 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1890
1891
1892 hcd->state = HC_STATE_RESUMING;
1893 mdelay(20 + 15);
1894
1895 isp1362_hcd->hc_control = OHCI_USB_OPER;
1896 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1897 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1898 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1899 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1900
1901 msleep(10);
1902
1903
1904 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
1905
1906 hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1907 hcd->state = HC_STATE_RUNNING;
1908 return 0;
1909 }
1910 #else
1911 #define isp1362_bus_suspend NULL
1912 #define isp1362_bus_resume NULL
1913 #endif
1914
1915
1916
1917 static void dump_irq(struct seq_file *s, char *label, u16 mask)
1918 {
1919 seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
1920 mask & HCuPINT_CLKRDY ? " clkrdy" : "",
1921 mask & HCuPINT_SUSP ? " susp" : "",
1922 mask & HCuPINT_OPR ? " opr" : "",
1923 mask & HCuPINT_EOT ? " eot" : "",
1924 mask & HCuPINT_ATL ? " atl" : "",
1925 mask & HCuPINT_SOF ? " sof" : "");
1926 }
1927
1928 static void dump_int(struct seq_file *s, char *label, u32 mask)
1929 {
1930 seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
1931 mask & OHCI_INTR_MIE ? " MIE" : "",
1932 mask & OHCI_INTR_RHSC ? " rhsc" : "",
1933 mask & OHCI_INTR_FNO ? " fno" : "",
1934 mask & OHCI_INTR_UE ? " ue" : "",
1935 mask & OHCI_INTR_RD ? " rd" : "",
1936 mask & OHCI_INTR_SF ? " sof" : "",
1937 mask & OHCI_INTR_SO ? " so" : "");
1938 }
1939
1940 static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
1941 {
1942 seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
1943 mask & OHCI_CTRL_RWC ? " rwc" : "",
1944 mask & OHCI_CTRL_RWE ? " rwe" : "",
1945 ({
1946 char *hcfs;
1947 switch (mask & OHCI_CTRL_HCFS) {
1948 case OHCI_USB_OPER:
1949 hcfs = " oper";
1950 break;
1951 case OHCI_USB_RESET:
1952 hcfs = " reset";
1953 break;
1954 case OHCI_USB_RESUME:
1955 hcfs = " resume";
1956 break;
1957 case OHCI_USB_SUSPEND:
1958 hcfs = " suspend";
1959 break;
1960 default:
1961 hcfs = " ?";
1962 }
1963 hcfs;
1964 }));
1965 }
1966
1967 static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
1968 {
1969 seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
1970 isp1362_read_reg32(isp1362_hcd, HCREVISION));
1971 seq_printf(s, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
1972 isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1973 seq_printf(s, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
1974 isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
1975 seq_printf(s, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
1976 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1977 seq_printf(s, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
1978 isp1362_read_reg32(isp1362_hcd, HCINTENB));
1979 seq_printf(s, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
1980 isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
1981 seq_printf(s, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
1982 isp1362_read_reg32(isp1362_hcd, HCFMREM));
1983 seq_printf(s, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
1984 isp1362_read_reg32(isp1362_hcd, HCFMNUM));
1985 seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
1986 isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
1987 seq_printf(s, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
1988 isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
1989 seq_printf(s, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
1990 isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
1991 seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
1992 isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
1993 seq_printf(s, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
1994 isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
1995 seq_printf(s, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
1996 isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
1997 seq_printf(s, "\n");
1998 seq_printf(s, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
1999 isp1362_read_reg16(isp1362_hcd, HCHWCFG));
2000 seq_printf(s, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
2001 isp1362_read_reg16(isp1362_hcd, HCDMACFG));
2002 seq_printf(s, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
2003 isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
2004 seq_printf(s, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
2005 isp1362_read_reg16(isp1362_hcd, HCuPINT));
2006 seq_printf(s, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
2007 isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2008 seq_printf(s, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
2009 isp1362_read_reg16(isp1362_hcd, HCCHIPID));
2010 seq_printf(s, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
2011 isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
2012 seq_printf(s, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
2013 isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
2014 seq_printf(s, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
2015 isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
2016 #if 0
2017 seq_printf(s, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA),
2018 isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
2019 #endif
2020 seq_printf(s, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
2021 isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
2022 seq_printf(s, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
2023 isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
2024 seq_printf(s, "\n");
2025 seq_printf(s, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
2026 isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
2027 seq_printf(s, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
2028 isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
2029 seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
2030 isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
2031 seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
2032 isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
2033 seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
2034 isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
2035 seq_printf(s, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
2036 isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
2037 seq_printf(s, "\n");
2038 seq_printf(s, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
2039 isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
2040 seq_printf(s, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
2041 isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
2042 #if 0
2043 seq_printf(s, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
2044 isp1362_read_reg32(isp1362_hcd, HCATLDONE));
2045 #endif
2046 seq_printf(s, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
2047 isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
2048 seq_printf(s, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
2049 isp1362_read_reg32(isp1362_hcd, HCATLLAST));
2050 seq_printf(s, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
2051 isp1362_read_reg16(isp1362_hcd, HCATLCURR));
2052 seq_printf(s, "\n");
2053 seq_printf(s, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
2054 isp1362_read_reg16(isp1362_hcd, HCATLDTC));
2055 seq_printf(s, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
2056 isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
2057 }
2058
2059 static int isp1362_show(struct seq_file *s, void *unused)
2060 {
2061 struct isp1362_hcd *isp1362_hcd = s->private;
2062 struct isp1362_ep *ep;
2063 int i;
2064
2065 seq_printf(s, "%s\n%s version %s\n",
2066 isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
2067
2068
2069
2070
2071 seq_printf(s, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2072 isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
2073 isp1362_hcd->stat2, isp1362_hcd->stat1);
2074 seq_printf(s, "max # ptds in ATL fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
2075 seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
2076 seq_printf(s, "max # ptds in ISTL fifo: %d\n",
2077 max(isp1362_hcd->istl_queue[0] .stat_maxptds,
2078 isp1362_hcd->istl_queue[1] .stat_maxptds));
2079
2080
2081 spin_lock_irq(&isp1362_hcd->lock);
2082
2083 dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2084 dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
2085 dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
2086 dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2087 dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2088
2089 for (i = 0; i < NUM_ISP1362_IRQS; i++)
2090 if (isp1362_hcd->irq_stat[i])
2091 seq_printf(s, "%-15s: %d\n",
2092 ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
2093
2094 dump_regs(s, isp1362_hcd);
2095 list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
2096 struct urb *urb;
2097
2098 seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
2099 ({
2100 char *s;
2101 switch (ep->nextpid) {
2102 case USB_PID_IN:
2103 s = "in";
2104 break;
2105 case USB_PID_OUT:
2106 s = "out";
2107 break;
2108 case USB_PID_SETUP:
2109 s = "setup";
2110 break;
2111 case USB_PID_ACK:
2112 s = "status";
2113 break;
2114 default:
2115 s = "?";
2116 break;
2117 }
2118 s;}), ep->maxpacket) ;
2119 list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
2120 seq_printf(s, " urb%p, %d/%d\n", urb,
2121 urb->actual_length,
2122 urb->transfer_buffer_length);
2123 }
2124 }
2125 if (!list_empty(&isp1362_hcd->async))
2126 seq_printf(s, "\n");
2127 dump_ptd_queue(&isp1362_hcd->atl_queue);
2128
2129 seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
2130
2131 list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
2132 seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
2133 isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
2134
2135 seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
2136 ep->interval, ep,
2137 (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2138 ep->udev->devnum, ep->epnum,
2139 (ep->epnum == 0) ? "" :
2140 ((ep->nextpid == USB_PID_IN) ?
2141 "in" : "out"), ep->maxpacket);
2142 }
2143 dump_ptd_queue(&isp1362_hcd->intl_queue);
2144
2145 seq_printf(s, "ISO:\n");
2146
2147 list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
2148 seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
2149 ep->interval, ep,
2150 (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2151 ep->udev->devnum, ep->epnum,
2152 (ep->epnum == 0) ? "" :
2153 ((ep->nextpid == USB_PID_IN) ?
2154 "in" : "out"), ep->maxpacket);
2155 }
2156
2157 spin_unlock_irq(&isp1362_hcd->lock);
2158 seq_printf(s, "\n");
2159
2160 return 0;
2161 }
2162 DEFINE_SHOW_ATTRIBUTE(isp1362);
2163
2164
2165 static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2166 {
2167 debugfs_create_file("isp1362", S_IRUGO, usb_debug_root, isp1362_hcd,
2168 &isp1362_fops);
2169 }
2170
2171 static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2172 {
2173 debugfs_remove(debugfs_lookup("isp1362", usb_debug_root));
2174 }
2175
2176
2177
2178 static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2179 {
2180 int tmp = 20;
2181
2182 isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
2183 isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
2184 while (--tmp) {
2185 mdelay(1);
2186 if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
2187 break;
2188 }
2189 if (!tmp)
2190 pr_err("Software reset timeout\n");
2191 }
2192
2193 static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2194 {
2195 unsigned long flags;
2196
2197 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2198 __isp1362_sw_reset(isp1362_hcd);
2199 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2200 }
2201
2202 static int isp1362_mem_config(struct usb_hcd *hcd)
2203 {
2204 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2205 unsigned long flags;
2206 u32 total;
2207 u16 istl_size = ISP1362_ISTL_BUFSIZE;
2208 u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
2209 u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
2210 u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
2211 u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
2212 u16 atl_size;
2213 int i;
2214
2215 WARN_ON(istl_size & 3);
2216 WARN_ON(atl_blksize & 3);
2217 WARN_ON(intl_blksize & 3);
2218 WARN_ON(atl_blksize < PTD_HEADER_SIZE);
2219 WARN_ON(intl_blksize < PTD_HEADER_SIZE);
2220
2221 BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
2222 if (atl_buffers > 32)
2223 atl_buffers = 32;
2224 atl_size = atl_buffers * atl_blksize;
2225 total = atl_size + intl_size + istl_size;
2226 dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2227 dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
2228 istl_size / 2, istl_size, 0, istl_size / 2);
2229 dev_info(hcd->self.controller, " INTL: %4d * (%3zu+8): %4d @ $%04x\n",
2230 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2231 intl_size, istl_size);
2232 dev_info(hcd->self.controller, " ATL : %4d * (%3zu+8): %4d @ $%04x\n",
2233 atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2234 atl_size, istl_size + intl_size);
2235 dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total,
2236 ISP1362_BUF_SIZE - total);
2237
2238 if (total > ISP1362_BUF_SIZE) {
2239 dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
2240 __func__, total, ISP1362_BUF_SIZE);
2241 return -ENOMEM;
2242 }
2243
2244 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2245
2246 for (i = 0; i < 2; i++) {
2247 isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
2248 isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
2249 isp1362_hcd->istl_queue[i].blk_size = 4;
2250 INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
2251 snprintf(isp1362_hcd->istl_queue[i].name,
2252 sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
2253 DBG(3, "%s: %5s buf $%04x %d\n", __func__,
2254 isp1362_hcd->istl_queue[i].name,
2255 isp1362_hcd->istl_queue[i].buf_start,
2256 isp1362_hcd->istl_queue[i].buf_size);
2257 }
2258 isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
2259
2260 isp1362_hcd->intl_queue.buf_start = istl_size;
2261 isp1362_hcd->intl_queue.buf_size = intl_size;
2262 isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
2263 isp1362_hcd->intl_queue.blk_size = intl_blksize;
2264 isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
2265 isp1362_hcd->intl_queue.skip_map = ~0;
2266 INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
2267
2268 isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
2269 isp1362_hcd->intl_queue.buf_size);
2270 isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
2271 isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
2272 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
2273 isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
2274 1 << (ISP1362_INTL_BUFFERS - 1));
2275
2276 isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
2277 isp1362_hcd->atl_queue.buf_size = atl_size;
2278 isp1362_hcd->atl_queue.buf_count = atl_buffers;
2279 isp1362_hcd->atl_queue.blk_size = atl_blksize;
2280 isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
2281 isp1362_hcd->atl_queue.skip_map = ~0;
2282 INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
2283
2284 isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
2285 isp1362_hcd->atl_queue.buf_size);
2286 isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
2287 isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
2288 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
2289 isp1362_write_reg32(isp1362_hcd, HCATLLAST,
2290 1 << (atl_buffers - 1));
2291
2292 snprintf(isp1362_hcd->atl_queue.name,
2293 sizeof(isp1362_hcd->atl_queue.name), "ATL");
2294 snprintf(isp1362_hcd->intl_queue.name,
2295 sizeof(isp1362_hcd->intl_queue.name), "INTL");
2296 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2297 isp1362_hcd->intl_queue.name,
2298 isp1362_hcd->intl_queue.buf_start,
2299 ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
2300 isp1362_hcd->intl_queue.buf_size);
2301 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2302 isp1362_hcd->atl_queue.name,
2303 isp1362_hcd->atl_queue.buf_start,
2304 atl_buffers, isp1362_hcd->atl_queue.blk_size,
2305 isp1362_hcd->atl_queue.buf_size);
2306
2307 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2308
2309 return 0;
2310 }
2311
2312 static int isp1362_hc_reset(struct usb_hcd *hcd)
2313 {
2314 int ret = 0;
2315 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2316 unsigned long t;
2317 unsigned long timeout = 100;
2318 unsigned long flags;
2319 int clkrdy = 0;
2320
2321 pr_debug("%s:\n", __func__);
2322
2323 if (isp1362_hcd->board && isp1362_hcd->board->reset) {
2324 isp1362_hcd->board->reset(hcd->self.controller, 1);
2325 msleep(20);
2326 if (isp1362_hcd->board->clock)
2327 isp1362_hcd->board->clock(hcd->self.controller, 1);
2328 isp1362_hcd->board->reset(hcd->self.controller, 0);
2329 } else
2330 isp1362_sw_reset(isp1362_hcd);
2331
2332
2333 t = jiffies + msecs_to_jiffies(timeout);
2334 while (!clkrdy && time_before_eq(jiffies, t)) {
2335 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2336 clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
2337 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2338 if (!clkrdy)
2339 msleep(4);
2340 }
2341
2342 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2343 isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
2344 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2345 if (!clkrdy) {
2346 pr_err("Clock not ready after %lums\n", timeout);
2347 ret = -ENODEV;
2348 }
2349 return ret;
2350 }
2351
2352 static void isp1362_hc_stop(struct usb_hcd *hcd)
2353 {
2354 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2355 unsigned long flags;
2356 u32 tmp;
2357
2358 pr_debug("%s:\n", __func__);
2359
2360 del_timer_sync(&hcd->rh_timer);
2361
2362 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2363
2364 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2365
2366
2367 tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2368 tmp &= ~(RH_A_NPS | RH_A_PSM);
2369 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
2370 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2371
2372
2373 if (isp1362_hcd->board && isp1362_hcd->board->reset)
2374 isp1362_hcd->board->reset(hcd->self.controller, 1);
2375 else
2376 __isp1362_sw_reset(isp1362_hcd);
2377
2378 if (isp1362_hcd->board && isp1362_hcd->board->clock)
2379 isp1362_hcd->board->clock(hcd->self.controller, 0);
2380
2381 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2382 }
2383
2384 #ifdef CHIP_BUFFER_TEST
2385 static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
2386 {
2387 int ret = 0;
2388 u16 *ref;
2389 unsigned long flags;
2390
2391 ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
2392 if (ref) {
2393 int offset;
2394 u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
2395
2396 for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
2397 ref[offset] = ~offset;
2398 tst[offset] = offset;
2399 }
2400
2401 for (offset = 0; offset < 4; offset++) {
2402 int j;
2403
2404 for (j = 0; j < 8; j++) {
2405 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2406 isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
2407 isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
2408 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2409
2410 if (memcmp(ref, tst, j)) {
2411 ret = -ENODEV;
2412 pr_err("%s: memory check with %d byte offset %d failed\n",
2413 __func__, j, offset);
2414 dump_data((u8 *)ref + offset, j);
2415 dump_data((u8 *)tst + offset, j);
2416 }
2417 }
2418 }
2419
2420 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2421 isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
2422 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2423 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2424
2425 if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
2426 ret = -ENODEV;
2427 pr_err("%s: memory check failed\n", __func__);
2428 dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
2429 }
2430
2431 for (offset = 0; offset < 256; offset++) {
2432 int test_size = 0;
2433
2434 yield();
2435
2436 memset(tst, 0, ISP1362_BUF_SIZE);
2437 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2438 isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2439 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2440 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2441 if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
2442 ISP1362_BUF_SIZE / 2)) {
2443 pr_err("%s: Failed to clear buffer\n", __func__);
2444 dump_data((u8 *)tst, ISP1362_BUF_SIZE);
2445 break;
2446 }
2447 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2448 isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
2449 isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
2450 offset * 2 + PTD_HEADER_SIZE, test_size);
2451 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2452 PTD_HEADER_SIZE + test_size);
2453 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2454 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2455 dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
2456 dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
2457 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2458 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2459 PTD_HEADER_SIZE + test_size);
2460 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2461 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2462 ret = -ENODEV;
2463 pr_err("%s: memory check with offset %02x failed\n",
2464 __func__, offset);
2465 break;
2466 }
2467 pr_warn("%s: memory check with offset %02x ok after second read\n",
2468 __func__, offset);
2469 }
2470 }
2471 kfree(ref);
2472 }
2473 return ret;
2474 }
2475 #endif
2476
2477 static int isp1362_hc_start(struct usb_hcd *hcd)
2478 {
2479 int ret;
2480 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2481 struct isp1362_platform_data *board = isp1362_hcd->board;
2482 u16 hwcfg;
2483 u16 chipid;
2484 unsigned long flags;
2485
2486 pr_debug("%s:\n", __func__);
2487
2488 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2489 chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
2490 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2491
2492 if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
2493 pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
2494 return -ENODEV;
2495 }
2496
2497 #ifdef CHIP_BUFFER_TEST
2498 ret = isp1362_chip_test(isp1362_hcd);
2499 if (ret)
2500 return -ENODEV;
2501 #endif
2502 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2503
2504 isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
2505 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2506
2507
2508 hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
2509 if (board->sel15Kres)
2510 hwcfg |= HCHWCFG_PULLDOWN_DS2 |
2511 ((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
2512 if (board->clknotstop)
2513 hwcfg |= HCHWCFG_CLKNOTSTOP;
2514 if (board->oc_enable)
2515 hwcfg |= HCHWCFG_ANALOG_OC;
2516 if (board->int_act_high)
2517 hwcfg |= HCHWCFG_INT_POL;
2518 if (board->int_edge_triggered)
2519 hwcfg |= HCHWCFG_INT_TRIGGER;
2520 if (board->dreq_act_high)
2521 hwcfg |= HCHWCFG_DREQ_POL;
2522 if (board->dack_act_high)
2523 hwcfg |= HCHWCFG_DACK_POL;
2524 isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
2525 isp1362_show_reg(isp1362_hcd, HCHWCFG);
2526 isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
2527 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2528
2529 ret = isp1362_mem_config(hcd);
2530 if (ret)
2531 return ret;
2532
2533 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2534
2535
2536 isp1362_hcd->rhdesca = 0;
2537 if (board->no_power_switching)
2538 isp1362_hcd->rhdesca |= RH_A_NPS;
2539 if (board->power_switching_mode)
2540 isp1362_hcd->rhdesca |= RH_A_PSM;
2541 if (board->potpg)
2542 isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
2543 else
2544 isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
2545
2546 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
2547 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
2548 isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2549
2550 isp1362_hcd->rhdescb = RH_B_PPCM;
2551 isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
2552 isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
2553
2554 isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
2555 isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
2556 isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
2557
2558 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2559
2560 isp1362_hcd->hc_control = OHCI_USB_OPER;
2561 hcd->state = HC_STATE_RUNNING;
2562
2563 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2564
2565 isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
2566 isp1362_hcd->intenb |= OHCI_INTR_RD;
2567 isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
2568 isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
2569 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
2570
2571
2572 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
2573
2574 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
2575
2576 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2577
2578 return 0;
2579 }
2580
2581
2582
2583 static const struct hc_driver isp1362_hc_driver = {
2584 .description = hcd_name,
2585 .product_desc = "ISP1362 Host Controller",
2586 .hcd_priv_size = sizeof(struct isp1362_hcd),
2587
2588 .irq = isp1362_irq,
2589 .flags = HCD_USB11 | HCD_MEMORY,
2590
2591 .reset = isp1362_hc_reset,
2592 .start = isp1362_hc_start,
2593 .stop = isp1362_hc_stop,
2594
2595 .urb_enqueue = isp1362_urb_enqueue,
2596 .urb_dequeue = isp1362_urb_dequeue,
2597 .endpoint_disable = isp1362_endpoint_disable,
2598
2599 .get_frame_number = isp1362_get_frame,
2600
2601 .hub_status_data = isp1362_hub_status_data,
2602 .hub_control = isp1362_hub_control,
2603 .bus_suspend = isp1362_bus_suspend,
2604 .bus_resume = isp1362_bus_resume,
2605 };
2606
2607
2608
2609 static int isp1362_remove(struct platform_device *pdev)
2610 {
2611 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2612 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2613
2614 remove_debug_file(isp1362_hcd);
2615 DBG(0, "%s: Removing HCD\n", __func__);
2616 usb_remove_hcd(hcd);
2617 DBG(0, "%s: put_hcd\n", __func__);
2618 usb_put_hcd(hcd);
2619 DBG(0, "%s: Done\n", __func__);
2620
2621 return 0;
2622 }
2623
2624 static int isp1362_probe(struct platform_device *pdev)
2625 {
2626 struct usb_hcd *hcd;
2627 struct isp1362_hcd *isp1362_hcd;
2628 struct resource *data, *irq_res;
2629 void __iomem *addr_reg;
2630 void __iomem *data_reg;
2631 int irq;
2632 int retval = 0;
2633 unsigned int irq_flags = 0;
2634
2635 if (usb_disabled())
2636 return -ENODEV;
2637
2638
2639
2640
2641
2642
2643 if (pdev->num_resources < 3)
2644 return -ENODEV;
2645
2646 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2647 if (!irq_res)
2648 return -ENODEV;
2649
2650 irq = irq_res->start;
2651
2652 addr_reg = devm_platform_ioremap_resource(pdev, 1);
2653 if (IS_ERR(addr_reg))
2654 return PTR_ERR(addr_reg);
2655
2656 data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2657 data_reg = devm_ioremap_resource(&pdev->dev, data);
2658 if (IS_ERR(data_reg))
2659 return PTR_ERR(data_reg);
2660
2661
2662 hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
2663 if (!hcd)
2664 return -ENOMEM;
2665
2666 hcd->rsrc_start = data->start;
2667 isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2668 isp1362_hcd->data_reg = data_reg;
2669 isp1362_hcd->addr_reg = addr_reg;
2670
2671 isp1362_hcd->next_statechange = jiffies;
2672 spin_lock_init(&isp1362_hcd->lock);
2673 INIT_LIST_HEAD(&isp1362_hcd->async);
2674 INIT_LIST_HEAD(&isp1362_hcd->periodic);
2675 INIT_LIST_HEAD(&isp1362_hcd->isoc);
2676 INIT_LIST_HEAD(&isp1362_hcd->remove_list);
2677 isp1362_hcd->board = dev_get_platdata(&pdev->dev);
2678 #if USE_PLATFORM_DELAY
2679 if (!isp1362_hcd->board->delay) {
2680 dev_err(hcd->self.controller, "No platform delay function given\n");
2681 retval = -ENODEV;
2682 goto err;
2683 }
2684 #endif
2685
2686 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2687 irq_flags |= IRQF_TRIGGER_RISING;
2688 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2689 irq_flags |= IRQF_TRIGGER_FALLING;
2690 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2691 irq_flags |= IRQF_TRIGGER_HIGH;
2692 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2693 irq_flags |= IRQF_TRIGGER_LOW;
2694
2695 retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED);
2696 if (retval != 0)
2697 goto err;
2698 device_wakeup_enable(hcd->self.controller);
2699
2700 dev_info(&pdev->dev, "%s, irq %d\n", hcd->product_desc, irq);
2701
2702 create_debug_file(isp1362_hcd);
2703
2704 return 0;
2705
2706 err:
2707 usb_put_hcd(hcd);
2708
2709 return retval;
2710 }
2711
2712 #ifdef CONFIG_PM
2713 static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
2714 {
2715 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2716 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2717 unsigned long flags;
2718 int retval = 0;
2719
2720 DBG(0, "%s: Suspending device\n", __func__);
2721
2722 if (state.event == PM_EVENT_FREEZE) {
2723 DBG(0, "%s: Suspending root hub\n", __func__);
2724 retval = isp1362_bus_suspend(hcd);
2725 } else {
2726 DBG(0, "%s: Suspending RH ports\n", __func__);
2727 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2728 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2729 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2730 }
2731 if (retval == 0)
2732 pdev->dev.power.power_state = state;
2733 return retval;
2734 }
2735
2736 static int isp1362_resume(struct platform_device *pdev)
2737 {
2738 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2739 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2740 unsigned long flags;
2741
2742 DBG(0, "%s: Resuming\n", __func__);
2743
2744 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2745 DBG(0, "%s: Resume RH ports\n", __func__);
2746 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2747 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
2748 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2749 return 0;
2750 }
2751
2752 pdev->dev.power.power_state = PMSG_ON;
2753
2754 return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
2755 }
2756 #else
2757 #define isp1362_suspend NULL
2758 #define isp1362_resume NULL
2759 #endif
2760
2761 static struct platform_driver isp1362_driver = {
2762 .probe = isp1362_probe,
2763 .remove = isp1362_remove,
2764
2765 .suspend = isp1362_suspend,
2766 .resume = isp1362_resume,
2767 .driver = {
2768 .name = hcd_name,
2769 },
2770 };
2771
2772 module_platform_driver(isp1362_driver);