Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * Copyright (C) 2003-2008 Takahiro Hirofuchi
0004  */
0005 
0006 #include <linux/kthread.h>
0007 #include <linux/socket.h>
0008 #include <linux/scatterlist.h>
0009 
0010 #include "usbip_common.h"
0011 #include "stub.h"
0012 
0013 /* be in spin_lock_irqsave(&sdev->priv_lock, flags) */
0014 void stub_enqueue_ret_unlink(struct stub_device *sdev, __u32 seqnum,
0015                  __u32 status)
0016 {
0017     struct stub_unlink *unlink;
0018 
0019     unlink = kzalloc(sizeof(struct stub_unlink), GFP_ATOMIC);
0020     if (!unlink) {
0021         usbip_event_add(&sdev->ud, VDEV_EVENT_ERROR_MALLOC);
0022         return;
0023     }
0024 
0025     unlink->seqnum = seqnum;
0026     unlink->status = status;
0027 
0028     list_add_tail(&unlink->list, &sdev->unlink_tx);
0029 }
0030 
0031 /**
0032  * stub_complete - completion handler of a usbip urb
0033  * @urb: pointer to the urb completed
0034  *
0035  * When a urb has completed, the USB core driver calls this function mostly in
0036  * the interrupt context. To return the result of a urb, the completed urb is
0037  * linked to the pending list of returning.
0038  *
0039  */
0040 void stub_complete(struct urb *urb)
0041 {
0042     struct stub_priv *priv = (struct stub_priv *) urb->context;
0043     struct stub_device *sdev = priv->sdev;
0044     unsigned long flags;
0045 
0046     usbip_dbg_stub_tx("complete! status %d\n", urb->status);
0047 
0048     switch (urb->status) {
0049     case 0:
0050         /* OK */
0051         break;
0052     case -ENOENT:
0053         dev_info(&urb->dev->dev,
0054              "stopped by a call to usb_kill_urb() because of cleaning up a virtual connection\n");
0055         return;
0056     case -ECONNRESET:
0057         dev_info(&urb->dev->dev,
0058              "unlinked by a call to usb_unlink_urb()\n");
0059         break;
0060     case -EPIPE:
0061         dev_info(&urb->dev->dev, "endpoint %d is stalled\n",
0062              usb_pipeendpoint(urb->pipe));
0063         break;
0064     case -ESHUTDOWN:
0065         dev_info(&urb->dev->dev, "device removed?\n");
0066         break;
0067     default:
0068         dev_info(&urb->dev->dev,
0069              "urb completion with non-zero status %d\n",
0070              urb->status);
0071         break;
0072     }
0073 
0074     /*
0075      * If the server breaks single SG request into the several URBs, the
0076      * URBs must be reassembled before sending completed URB to the vhci.
0077      * Don't wake up the tx thread until all the URBs are completed.
0078      */
0079     if (priv->sgl) {
0080         priv->completed_urbs++;
0081 
0082         /* Only save the first error status */
0083         if (urb->status && !priv->urb_status)
0084             priv->urb_status = urb->status;
0085 
0086         if (priv->completed_urbs < priv->num_urbs)
0087             return;
0088     }
0089 
0090     /* link a urb to the queue of tx. */
0091     spin_lock_irqsave(&sdev->priv_lock, flags);
0092     if (sdev->ud.tcp_socket == NULL) {
0093         usbip_dbg_stub_tx("ignore urb for closed connection\n");
0094         /* It will be freed in stub_device_cleanup_urbs(). */
0095     } else if (priv->unlinking) {
0096         stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status);
0097         stub_free_priv_and_urb(priv);
0098     } else {
0099         list_move_tail(&priv->list, &sdev->priv_tx);
0100     }
0101     spin_unlock_irqrestore(&sdev->priv_lock, flags);
0102 
0103     /* wake up tx_thread */
0104     wake_up(&sdev->tx_waitq);
0105 }
0106 
0107 static inline void setup_base_pdu(struct usbip_header_basic *base,
0108                   __u32 command, __u32 seqnum)
0109 {
0110     base->command   = command;
0111     base->seqnum    = seqnum;
0112     base->devid = 0;
0113     base->ep    = 0;
0114     base->direction = 0;
0115 }
0116 
0117 static void setup_ret_submit_pdu(struct usbip_header *rpdu, struct urb *urb)
0118 {
0119     struct stub_priv *priv = (struct stub_priv *) urb->context;
0120 
0121     setup_base_pdu(&rpdu->base, USBIP_RET_SUBMIT, priv->seqnum);
0122     usbip_pack_pdu(rpdu, urb, USBIP_RET_SUBMIT, 1);
0123 }
0124 
0125 static void setup_ret_unlink_pdu(struct usbip_header *rpdu,
0126                  struct stub_unlink *unlink)
0127 {
0128     setup_base_pdu(&rpdu->base, USBIP_RET_UNLINK, unlink->seqnum);
0129     rpdu->u.ret_unlink.status = unlink->status;
0130 }
0131 
0132 static struct stub_priv *dequeue_from_priv_tx(struct stub_device *sdev)
0133 {
0134     unsigned long flags;
0135     struct stub_priv *priv, *tmp;
0136 
0137     spin_lock_irqsave(&sdev->priv_lock, flags);
0138 
0139     list_for_each_entry_safe(priv, tmp, &sdev->priv_tx, list) {
0140         list_move_tail(&priv->list, &sdev->priv_free);
0141         spin_unlock_irqrestore(&sdev->priv_lock, flags);
0142         return priv;
0143     }
0144 
0145     spin_unlock_irqrestore(&sdev->priv_lock, flags);
0146 
0147     return NULL;
0148 }
0149 
0150 static int stub_send_ret_submit(struct stub_device *sdev)
0151 {
0152     unsigned long flags;
0153     struct stub_priv *priv, *tmp;
0154 
0155     struct msghdr msg;
0156     size_t txsize;
0157 
0158     size_t total_size = 0;
0159 
0160     while ((priv = dequeue_from_priv_tx(sdev)) != NULL) {
0161         struct urb *urb = priv->urbs[0];
0162         struct usbip_header pdu_header;
0163         struct usbip_iso_packet_descriptor *iso_buffer = NULL;
0164         struct kvec *iov = NULL;
0165         struct scatterlist *sg;
0166         u32 actual_length = 0;
0167         int iovnum = 0;
0168         int ret;
0169         int i;
0170 
0171         txsize = 0;
0172         memset(&pdu_header, 0, sizeof(pdu_header));
0173         memset(&msg, 0, sizeof(msg));
0174 
0175         if (urb->actual_length > 0 && !urb->transfer_buffer &&
0176            !urb->num_sgs) {
0177             dev_err(&sdev->udev->dev,
0178                 "urb: actual_length %d transfer_buffer null\n",
0179                 urb->actual_length);
0180             return -1;
0181         }
0182 
0183         if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
0184             iovnum = 2 + urb->number_of_packets;
0185         else if (usb_pipein(urb->pipe) && urb->actual_length > 0 &&
0186             urb->num_sgs)
0187             iovnum = 1 + urb->num_sgs;
0188         else if (usb_pipein(urb->pipe) && priv->sgl)
0189             iovnum = 1 + priv->num_urbs;
0190         else
0191             iovnum = 2;
0192 
0193         iov = kcalloc(iovnum, sizeof(struct kvec), GFP_KERNEL);
0194 
0195         if (!iov) {
0196             usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC);
0197             return -1;
0198         }
0199 
0200         iovnum = 0;
0201 
0202         /* 1. setup usbip_header */
0203         setup_ret_submit_pdu(&pdu_header, urb);
0204         usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
0205                   pdu_header.base.seqnum);
0206 
0207         if (priv->sgl) {
0208             for (i = 0; i < priv->num_urbs; i++)
0209                 actual_length += priv->urbs[i]->actual_length;
0210 
0211             pdu_header.u.ret_submit.status = priv->urb_status;
0212             pdu_header.u.ret_submit.actual_length = actual_length;
0213         }
0214 
0215         usbip_header_correct_endian(&pdu_header, 1);
0216 
0217         iov[iovnum].iov_base = &pdu_header;
0218         iov[iovnum].iov_len  = sizeof(pdu_header);
0219         iovnum++;
0220         txsize += sizeof(pdu_header);
0221 
0222         /* 2. setup transfer buffer */
0223         if (usb_pipein(urb->pipe) && priv->sgl) {
0224             /* If the server split a single SG request into several
0225              * URBs because the server's HCD doesn't support SG,
0226              * reassemble the split URB buffers into a single
0227              * return command.
0228              */
0229             for (i = 0; i < priv->num_urbs; i++) {
0230                 iov[iovnum].iov_base =
0231                     priv->urbs[i]->transfer_buffer;
0232                 iov[iovnum].iov_len =
0233                     priv->urbs[i]->actual_length;
0234                 iovnum++;
0235             }
0236             txsize += actual_length;
0237         } else if (usb_pipein(urb->pipe) &&
0238             usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS &&
0239             urb->actual_length > 0) {
0240             if (urb->num_sgs) {
0241                 unsigned int copy = urb->actual_length;
0242                 int size;
0243 
0244                 for_each_sg(urb->sg, sg, urb->num_sgs, i) {
0245                     if (copy == 0)
0246                         break;
0247 
0248                     if (copy < sg->length)
0249                         size = copy;
0250                     else
0251                         size = sg->length;
0252 
0253                     iov[iovnum].iov_base = sg_virt(sg);
0254                     iov[iovnum].iov_len = size;
0255 
0256                     iovnum++;
0257                     copy -= size;
0258                 }
0259             } else {
0260                 iov[iovnum].iov_base = urb->transfer_buffer;
0261                 iov[iovnum].iov_len  = urb->actual_length;
0262                 iovnum++;
0263             }
0264             txsize += urb->actual_length;
0265         } else if (usb_pipein(urb->pipe) &&
0266                usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
0267             /*
0268              * For isochronous packets: actual length is the sum of
0269              * the actual length of the individual, packets, but as
0270              * the packet offsets are not changed there will be
0271              * padding between the packets. To optimally use the
0272              * bandwidth the padding is not transmitted.
0273              */
0274 
0275             int i;
0276 
0277             for (i = 0; i < urb->number_of_packets; i++) {
0278                 iov[iovnum].iov_base = urb->transfer_buffer +
0279                     urb->iso_frame_desc[i].offset;
0280                 iov[iovnum].iov_len =
0281                     urb->iso_frame_desc[i].actual_length;
0282                 iovnum++;
0283                 txsize += urb->iso_frame_desc[i].actual_length;
0284             }
0285 
0286             if (txsize != sizeof(pdu_header) + urb->actual_length) {
0287                 dev_err(&sdev->udev->dev,
0288                     "actual length of urb %d does not match iso packet sizes %zu\n",
0289                     urb->actual_length,
0290                     txsize-sizeof(pdu_header));
0291                 kfree(iov);
0292                 usbip_event_add(&sdev->ud,
0293                         SDEV_EVENT_ERROR_TCP);
0294                 return -1;
0295             }
0296         }
0297 
0298         /* 3. setup iso_packet_descriptor */
0299         if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
0300             ssize_t len = 0;
0301 
0302             iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len);
0303             if (!iso_buffer) {
0304                 usbip_event_add(&sdev->ud,
0305                         SDEV_EVENT_ERROR_MALLOC);
0306                 kfree(iov);
0307                 return -1;
0308             }
0309 
0310             iov[iovnum].iov_base = iso_buffer;
0311             iov[iovnum].iov_len  = len;
0312             txsize += len;
0313             iovnum++;
0314         }
0315 
0316         ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg,
0317                         iov,  iovnum, txsize);
0318         if (ret != txsize) {
0319             dev_err(&sdev->udev->dev,
0320                 "sendmsg failed!, retval %d for %zd\n",
0321                 ret, txsize);
0322             kfree(iov);
0323             kfree(iso_buffer);
0324             usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP);
0325             return -1;
0326         }
0327 
0328         kfree(iov);
0329         kfree(iso_buffer);
0330 
0331         total_size += txsize;
0332     }
0333 
0334     spin_lock_irqsave(&sdev->priv_lock, flags);
0335     list_for_each_entry_safe(priv, tmp, &sdev->priv_free, list) {
0336         stub_free_priv_and_urb(priv);
0337     }
0338     spin_unlock_irqrestore(&sdev->priv_lock, flags);
0339 
0340     return total_size;
0341 }
0342 
0343 static struct stub_unlink *dequeue_from_unlink_tx(struct stub_device *sdev)
0344 {
0345     unsigned long flags;
0346     struct stub_unlink *unlink, *tmp;
0347 
0348     spin_lock_irqsave(&sdev->priv_lock, flags);
0349 
0350     list_for_each_entry_safe(unlink, tmp, &sdev->unlink_tx, list) {
0351         list_move_tail(&unlink->list, &sdev->unlink_free);
0352         spin_unlock_irqrestore(&sdev->priv_lock, flags);
0353         return unlink;
0354     }
0355 
0356     spin_unlock_irqrestore(&sdev->priv_lock, flags);
0357 
0358     return NULL;
0359 }
0360 
0361 static int stub_send_ret_unlink(struct stub_device *sdev)
0362 {
0363     unsigned long flags;
0364     struct stub_unlink *unlink, *tmp;
0365 
0366     struct msghdr msg;
0367     struct kvec iov[1];
0368     size_t txsize;
0369 
0370     size_t total_size = 0;
0371 
0372     while ((unlink = dequeue_from_unlink_tx(sdev)) != NULL) {
0373         int ret;
0374         struct usbip_header pdu_header;
0375 
0376         txsize = 0;
0377         memset(&pdu_header, 0, sizeof(pdu_header));
0378         memset(&msg, 0, sizeof(msg));
0379         memset(&iov, 0, sizeof(iov));
0380 
0381         usbip_dbg_stub_tx("setup ret unlink %lu\n", unlink->seqnum);
0382 
0383         /* 1. setup usbip_header */
0384         setup_ret_unlink_pdu(&pdu_header, unlink);
0385         usbip_header_correct_endian(&pdu_header, 1);
0386 
0387         iov[0].iov_base = &pdu_header;
0388         iov[0].iov_len  = sizeof(pdu_header);
0389         txsize += sizeof(pdu_header);
0390 
0391         ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, iov,
0392                      1, txsize);
0393         if (ret != txsize) {
0394             dev_err(&sdev->udev->dev,
0395                 "sendmsg failed!, retval %d for %zd\n",
0396                 ret, txsize);
0397             usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP);
0398             return -1;
0399         }
0400 
0401         usbip_dbg_stub_tx("send txdata\n");
0402         total_size += txsize;
0403     }
0404 
0405     spin_lock_irqsave(&sdev->priv_lock, flags);
0406 
0407     list_for_each_entry_safe(unlink, tmp, &sdev->unlink_free, list) {
0408         list_del(&unlink->list);
0409         kfree(unlink);
0410     }
0411 
0412     spin_unlock_irqrestore(&sdev->priv_lock, flags);
0413 
0414     return total_size;
0415 }
0416 
0417 int stub_tx_loop(void *data)
0418 {
0419     struct usbip_device *ud = data;
0420     struct stub_device *sdev = container_of(ud, struct stub_device, ud);
0421 
0422     while (!kthread_should_stop()) {
0423         if (usbip_event_happened(ud))
0424             break;
0425 
0426         /*
0427          * send_ret_submit comes earlier than send_ret_unlink.  stub_rx
0428          * looks at only priv_init queue. If the completion of a URB is
0429          * earlier than the receive of CMD_UNLINK, priv is moved to
0430          * priv_tx queue and stub_rx does not find the target priv. In
0431          * this case, vhci_rx receives the result of the submit request
0432          * and then receives the result of the unlink request. The
0433          * result of the submit is given back to the usbcore as the
0434          * completion of the unlink request. The request of the
0435          * unlink is ignored. This is ok because a driver who calls
0436          * usb_unlink_urb() understands the unlink was too late by
0437          * getting the status of the given-backed URB which has the
0438          * status of usb_submit_urb().
0439          */
0440         if (stub_send_ret_submit(sdev) < 0)
0441             break;
0442 
0443         if (stub_send_ret_unlink(sdev) < 0)
0444             break;
0445 
0446         wait_event_interruptible(sdev->tx_waitq,
0447                      (!list_empty(&sdev->priv_tx) ||
0448                       !list_empty(&sdev->unlink_tx) ||
0449                       kthread_should_stop()));
0450     }
0451 
0452     return 0;
0453 }