Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Greybus operations
0004  *
0005  * Copyright 2014-2015 Google Inc.
0006  * Copyright 2014-2015 Linaro Ltd.
0007  */
0008 
0009 #include <linux/kernel.h>
0010 #include <linux/slab.h>
0011 #include <linux/module.h>
0012 #include <linux/sched.h>
0013 #include <linux/wait.h>
0014 #include <linux/workqueue.h>
0015 #include <linux/greybus.h>
0016 
0017 #include "greybus_trace.h"
0018 
0019 static struct kmem_cache *gb_operation_cache;
0020 static struct kmem_cache *gb_message_cache;
0021 
0022 /* Workqueue to handle Greybus operation completions. */
0023 static struct workqueue_struct *gb_operation_completion_wq;
0024 
0025 /* Wait queue for synchronous cancellations. */
0026 static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
0027 
0028 /*
0029  * Protects updates to operation->errno.
0030  */
0031 static DEFINE_SPINLOCK(gb_operations_lock);
0032 
0033 static int gb_operation_response_send(struct gb_operation *operation,
0034                       int errno);
0035 
0036 /*
0037  * Increment operation active count and add to connection list unless the
0038  * connection is going away.
0039  *
0040  * Caller holds operation reference.
0041  */
0042 static int gb_operation_get_active(struct gb_operation *operation)
0043 {
0044     struct gb_connection *connection = operation->connection;
0045     unsigned long flags;
0046 
0047     spin_lock_irqsave(&connection->lock, flags);
0048     switch (connection->state) {
0049     case GB_CONNECTION_STATE_ENABLED:
0050         break;
0051     case GB_CONNECTION_STATE_ENABLED_TX:
0052         if (gb_operation_is_incoming(operation))
0053             goto err_unlock;
0054         break;
0055     case GB_CONNECTION_STATE_DISCONNECTING:
0056         if (!gb_operation_is_core(operation))
0057             goto err_unlock;
0058         break;
0059     default:
0060         goto err_unlock;
0061     }
0062 
0063     if (operation->active++ == 0)
0064         list_add_tail(&operation->links, &connection->operations);
0065 
0066     trace_gb_operation_get_active(operation);
0067 
0068     spin_unlock_irqrestore(&connection->lock, flags);
0069 
0070     return 0;
0071 
0072 err_unlock:
0073     spin_unlock_irqrestore(&connection->lock, flags);
0074 
0075     return -ENOTCONN;
0076 }
0077 
0078 /* Caller holds operation reference. */
0079 static void gb_operation_put_active(struct gb_operation *operation)
0080 {
0081     struct gb_connection *connection = operation->connection;
0082     unsigned long flags;
0083 
0084     spin_lock_irqsave(&connection->lock, flags);
0085 
0086     trace_gb_operation_put_active(operation);
0087 
0088     if (--operation->active == 0) {
0089         list_del(&operation->links);
0090         if (atomic_read(&operation->waiters))
0091             wake_up(&gb_operation_cancellation_queue);
0092     }
0093     spin_unlock_irqrestore(&connection->lock, flags);
0094 }
0095 
0096 static bool gb_operation_is_active(struct gb_operation *operation)
0097 {
0098     struct gb_connection *connection = operation->connection;
0099     unsigned long flags;
0100     bool ret;
0101 
0102     spin_lock_irqsave(&connection->lock, flags);
0103     ret = operation->active;
0104     spin_unlock_irqrestore(&connection->lock, flags);
0105 
0106     return ret;
0107 }
0108 
0109 /*
0110  * Set an operation's result.
0111  *
0112  * Initially an outgoing operation's errno value is -EBADR.
0113  * If no error occurs before sending the request message the only
0114  * valid value operation->errno can be set to is -EINPROGRESS,
0115  * indicating the request has been (or rather is about to be) sent.
0116  * At that point nobody should be looking at the result until the
0117  * response arrives.
0118  *
0119  * The first time the result gets set after the request has been
0120  * sent, that result "sticks."  That is, if two concurrent threads
0121  * race to set the result, the first one wins.  The return value
0122  * tells the caller whether its result was recorded; if not the
0123  * caller has nothing more to do.
0124  *
0125  * The result value -EILSEQ is reserved to signal an implementation
0126  * error; if it's ever observed, the code performing the request has
0127  * done something fundamentally wrong.  It is an error to try to set
0128  * the result to -EBADR, and attempts to do so result in a warning,
0129  * and -EILSEQ is used instead.  Similarly, the only valid result
0130  * value to set for an operation in initial state is -EINPROGRESS.
0131  * Attempts to do otherwise will also record a (successful) -EILSEQ
0132  * operation result.
0133  */
0134 static bool gb_operation_result_set(struct gb_operation *operation, int result)
0135 {
0136     unsigned long flags;
0137     int prev;
0138 
0139     if (result == -EINPROGRESS) {
0140         /*
0141          * -EINPROGRESS is used to indicate the request is
0142          * in flight.  It should be the first result value
0143          * set after the initial -EBADR.  Issue a warning
0144          * and record an implementation error if it's
0145          * set at any other time.
0146          */
0147         spin_lock_irqsave(&gb_operations_lock, flags);
0148         prev = operation->errno;
0149         if (prev == -EBADR)
0150             operation->errno = result;
0151         else
0152             operation->errno = -EILSEQ;
0153         spin_unlock_irqrestore(&gb_operations_lock, flags);
0154         WARN_ON(prev != -EBADR);
0155 
0156         return true;
0157     }
0158 
0159     /*
0160      * The first result value set after a request has been sent
0161      * will be the final result of the operation.  Subsequent
0162      * attempts to set the result are ignored.
0163      *
0164      * Note that -EBADR is a reserved "initial state" result
0165      * value.  Attempts to set this value result in a warning,
0166      * and the result code is set to -EILSEQ instead.
0167      */
0168     if (WARN_ON(result == -EBADR))
0169         result = -EILSEQ; /* Nobody should be setting -EBADR */
0170 
0171     spin_lock_irqsave(&gb_operations_lock, flags);
0172     prev = operation->errno;
0173     if (prev == -EINPROGRESS)
0174         operation->errno = result;  /* First and final result */
0175     spin_unlock_irqrestore(&gb_operations_lock, flags);
0176 
0177     return prev == -EINPROGRESS;
0178 }
0179 
0180 int gb_operation_result(struct gb_operation *operation)
0181 {
0182     int result = operation->errno;
0183 
0184     WARN_ON(result == -EBADR);
0185     WARN_ON(result == -EINPROGRESS);
0186 
0187     return result;
0188 }
0189 EXPORT_SYMBOL_GPL(gb_operation_result);
0190 
0191 /*
0192  * Looks up an outgoing operation on a connection and returns a refcounted
0193  * pointer if found, or NULL otherwise.
0194  */
0195 static struct gb_operation *
0196 gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
0197 {
0198     struct gb_operation *operation;
0199     unsigned long flags;
0200     bool found = false;
0201 
0202     spin_lock_irqsave(&connection->lock, flags);
0203     list_for_each_entry(operation, &connection->operations, links)
0204         if (operation->id == operation_id &&
0205             !gb_operation_is_incoming(operation)) {
0206             gb_operation_get(operation);
0207             found = true;
0208             break;
0209         }
0210     spin_unlock_irqrestore(&connection->lock, flags);
0211 
0212     return found ? operation : NULL;
0213 }
0214 
0215 static int gb_message_send(struct gb_message *message, gfp_t gfp)
0216 {
0217     struct gb_connection *connection = message->operation->connection;
0218 
0219     trace_gb_message_send(message);
0220     return connection->hd->driver->message_send(connection->hd,
0221                     connection->hd_cport_id,
0222                     message,
0223                     gfp);
0224 }
0225 
0226 /*
0227  * Cancel a message we have passed to the host device layer to be sent.
0228  */
0229 static void gb_message_cancel(struct gb_message *message)
0230 {
0231     struct gb_host_device *hd = message->operation->connection->hd;
0232 
0233     hd->driver->message_cancel(message);
0234 }
0235 
0236 static void gb_operation_request_handle(struct gb_operation *operation)
0237 {
0238     struct gb_connection *connection = operation->connection;
0239     int status;
0240     int ret;
0241 
0242     if (connection->handler) {
0243         status = connection->handler(operation);
0244     } else {
0245         dev_err(&connection->hd->dev,
0246             "%s: unexpected incoming request of type 0x%02x\n",
0247             connection->name, operation->type);
0248 
0249         status = -EPROTONOSUPPORT;
0250     }
0251 
0252     ret = gb_operation_response_send(operation, status);
0253     if (ret) {
0254         dev_err(&connection->hd->dev,
0255             "%s: failed to send response %d for type 0x%02x: %d\n",
0256             connection->name, status, operation->type, ret);
0257         return;
0258     }
0259 }
0260 
0261 /*
0262  * Process operation work.
0263  *
0264  * For incoming requests, call the protocol request handler. The operation
0265  * result should be -EINPROGRESS at this point.
0266  *
0267  * For outgoing requests, the operation result value should have
0268  * been set before queueing this.  The operation callback function
0269  * allows the original requester to know the request has completed
0270  * and its result is available.
0271  */
0272 static void gb_operation_work(struct work_struct *work)
0273 {
0274     struct gb_operation *operation;
0275     int ret;
0276 
0277     operation = container_of(work, struct gb_operation, work);
0278 
0279     if (gb_operation_is_incoming(operation)) {
0280         gb_operation_request_handle(operation);
0281     } else {
0282         ret = del_timer_sync(&operation->timer);
0283         if (!ret) {
0284             /* Cancel request message if scheduled by timeout. */
0285             if (gb_operation_result(operation) == -ETIMEDOUT)
0286                 gb_message_cancel(operation->request);
0287         }
0288 
0289         operation->callback(operation);
0290     }
0291 
0292     gb_operation_put_active(operation);
0293     gb_operation_put(operation);
0294 }
0295 
0296 static void gb_operation_timeout(struct timer_list *t)
0297 {
0298     struct gb_operation *operation = from_timer(operation, t, timer);
0299 
0300     if (gb_operation_result_set(operation, -ETIMEDOUT)) {
0301         /*
0302          * A stuck request message will be cancelled from the
0303          * workqueue.
0304          */
0305         queue_work(gb_operation_completion_wq, &operation->work);
0306     }
0307 }
0308 
0309 static void gb_operation_message_init(struct gb_host_device *hd,
0310                       struct gb_message *message,
0311                       u16 operation_id,
0312                       size_t payload_size, u8 type)
0313 {
0314     struct gb_operation_msg_hdr *header;
0315 
0316     header = message->buffer;
0317 
0318     message->header = header;
0319     message->payload = payload_size ? header + 1 : NULL;
0320     message->payload_size = payload_size;
0321 
0322     /*
0323      * The type supplied for incoming message buffers will be
0324      * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
0325      * arriving data so there's no need to initialize the message header.
0326      */
0327     if (type != GB_REQUEST_TYPE_INVALID) {
0328         u16 message_size = (u16)(sizeof(*header) + payload_size);
0329 
0330         /*
0331          * For a request, the operation id gets filled in
0332          * when the message is sent.  For a response, it
0333          * will be copied from the request by the caller.
0334          *
0335          * The result field in a request message must be
0336          * zero.  It will be set just prior to sending for
0337          * a response.
0338          */
0339         header->size = cpu_to_le16(message_size);
0340         header->operation_id = 0;
0341         header->type = type;
0342         header->result = 0;
0343     }
0344 }
0345 
0346 /*
0347  * Allocate a message to be used for an operation request or response.
0348  * Both types of message contain a common header.  The request message
0349  * for an outgoing operation is outbound, as is the response message
0350  * for an incoming operation.  The message header for an outbound
0351  * message is partially initialized here.
0352  *
0353  * The headers for inbound messages don't need to be initialized;
0354  * they'll be filled in by arriving data.
0355  *
0356  * Our message buffers have the following layout:
0357  *  message header  \_ these combined are
0358  *  message payload /  the message size
0359  */
0360 static struct gb_message *
0361 gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
0362                size_t payload_size, gfp_t gfp_flags)
0363 {
0364     struct gb_message *message;
0365     struct gb_operation_msg_hdr *header;
0366     size_t message_size = payload_size + sizeof(*header);
0367 
0368     if (message_size > hd->buffer_size_max) {
0369         dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
0370              message_size, hd->buffer_size_max);
0371         return NULL;
0372     }
0373 
0374     /* Allocate the message structure and buffer. */
0375     message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
0376     if (!message)
0377         return NULL;
0378 
0379     message->buffer = kzalloc(message_size, gfp_flags);
0380     if (!message->buffer)
0381         goto err_free_message;
0382 
0383     /* Initialize the message.  Operation id is filled in later. */
0384     gb_operation_message_init(hd, message, 0, payload_size, type);
0385 
0386     return message;
0387 
0388 err_free_message:
0389     kmem_cache_free(gb_message_cache, message);
0390 
0391     return NULL;
0392 }
0393 
0394 static void gb_operation_message_free(struct gb_message *message)
0395 {
0396     kfree(message->buffer);
0397     kmem_cache_free(gb_message_cache, message);
0398 }
0399 
0400 /*
0401  * Map an enum gb_operation_status value (which is represented in a
0402  * message as a single byte) to an appropriate Linux negative errno.
0403  */
0404 static int gb_operation_status_map(u8 status)
0405 {
0406     switch (status) {
0407     case GB_OP_SUCCESS:
0408         return 0;
0409     case GB_OP_INTERRUPTED:
0410         return -EINTR;
0411     case GB_OP_TIMEOUT:
0412         return -ETIMEDOUT;
0413     case GB_OP_NO_MEMORY:
0414         return -ENOMEM;
0415     case GB_OP_PROTOCOL_BAD:
0416         return -EPROTONOSUPPORT;
0417     case GB_OP_OVERFLOW:
0418         return -EMSGSIZE;
0419     case GB_OP_INVALID:
0420         return -EINVAL;
0421     case GB_OP_RETRY:
0422         return -EAGAIN;
0423     case GB_OP_NONEXISTENT:
0424         return -ENODEV;
0425     case GB_OP_MALFUNCTION:
0426         return -EILSEQ;
0427     case GB_OP_UNKNOWN_ERROR:
0428     default:
0429         return -EIO;
0430     }
0431 }
0432 
0433 /*
0434  * Map a Linux errno value (from operation->errno) into the value
0435  * that should represent it in a response message status sent
0436  * over the wire.  Returns an enum gb_operation_status value (which
0437  * is represented in a message as a single byte).
0438  */
0439 static u8 gb_operation_errno_map(int errno)
0440 {
0441     switch (errno) {
0442     case 0:
0443         return GB_OP_SUCCESS;
0444     case -EINTR:
0445         return GB_OP_INTERRUPTED;
0446     case -ETIMEDOUT:
0447         return GB_OP_TIMEOUT;
0448     case -ENOMEM:
0449         return GB_OP_NO_MEMORY;
0450     case -EPROTONOSUPPORT:
0451         return GB_OP_PROTOCOL_BAD;
0452     case -EMSGSIZE:
0453         return GB_OP_OVERFLOW;  /* Could be underflow too */
0454     case -EINVAL:
0455         return GB_OP_INVALID;
0456     case -EAGAIN:
0457         return GB_OP_RETRY;
0458     case -EILSEQ:
0459         return GB_OP_MALFUNCTION;
0460     case -ENODEV:
0461         return GB_OP_NONEXISTENT;
0462     case -EIO:
0463     default:
0464         return GB_OP_UNKNOWN_ERROR;
0465     }
0466 }
0467 
0468 bool gb_operation_response_alloc(struct gb_operation *operation,
0469                  size_t response_size, gfp_t gfp)
0470 {
0471     struct gb_host_device *hd = operation->connection->hd;
0472     struct gb_operation_msg_hdr *request_header;
0473     struct gb_message *response;
0474     u8 type;
0475 
0476     type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
0477     response = gb_operation_message_alloc(hd, type, response_size, gfp);
0478     if (!response)
0479         return false;
0480     response->operation = operation;
0481 
0482     /*
0483      * Size and type get initialized when the message is
0484      * allocated.  The errno will be set before sending.  All
0485      * that's left is the operation id, which we copy from the
0486      * request message header (as-is, in little-endian order).
0487      */
0488     request_header = operation->request->header;
0489     response->header->operation_id = request_header->operation_id;
0490     operation->response = response;
0491 
0492     return true;
0493 }
0494 EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
0495 
0496 /*
0497  * Create a Greybus operation to be sent over the given connection.
0498  * The request buffer will be big enough for a payload of the given
0499  * size.
0500  *
0501  * For outgoing requests, the request message's header will be
0502  * initialized with the type of the request and the message size.
0503  * Outgoing operations must also specify the response buffer size,
0504  * which must be sufficient to hold all expected response data.  The
0505  * response message header will eventually be overwritten, so there's
0506  * no need to initialize it here.
0507  *
0508  * Request messages for incoming operations can arrive in interrupt
0509  * context, so they must be allocated with GFP_ATOMIC.  In this case
0510  * the request buffer will be immediately overwritten, so there is
0511  * no need to initialize the message header.  Responsibility for
0512  * allocating a response buffer lies with the incoming request
0513  * handler for a protocol.  So we don't allocate that here.
0514  *
0515  * Returns a pointer to the new operation or a null pointer if an
0516  * error occurs.
0517  */
0518 static struct gb_operation *
0519 gb_operation_create_common(struct gb_connection *connection, u8 type,
0520                size_t request_size, size_t response_size,
0521                unsigned long op_flags, gfp_t gfp_flags)
0522 {
0523     struct gb_host_device *hd = connection->hd;
0524     struct gb_operation *operation;
0525 
0526     operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
0527     if (!operation)
0528         return NULL;
0529     operation->connection = connection;
0530 
0531     operation->request = gb_operation_message_alloc(hd, type, request_size,
0532                             gfp_flags);
0533     if (!operation->request)
0534         goto err_cache;
0535     operation->request->operation = operation;
0536 
0537     /* Allocate the response buffer for outgoing operations */
0538     if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
0539         if (!gb_operation_response_alloc(operation, response_size,
0540                          gfp_flags)) {
0541             goto err_request;
0542         }
0543 
0544         timer_setup(&operation->timer, gb_operation_timeout, 0);
0545     }
0546 
0547     operation->flags = op_flags;
0548     operation->type = type;
0549     operation->errno = -EBADR;  /* Initial value--means "never set" */
0550 
0551     INIT_WORK(&operation->work, gb_operation_work);
0552     init_completion(&operation->completion);
0553     kref_init(&operation->kref);
0554     atomic_set(&operation->waiters, 0);
0555 
0556     return operation;
0557 
0558 err_request:
0559     gb_operation_message_free(operation->request);
0560 err_cache:
0561     kmem_cache_free(gb_operation_cache, operation);
0562 
0563     return NULL;
0564 }
0565 
0566 /*
0567  * Create a new operation associated with the given connection.  The
0568  * request and response sizes provided are the number of bytes
0569  * required to hold the request/response payload only.  Both of
0570  * these are allowed to be 0.  Note that 0x00 is reserved as an
0571  * invalid operation type for all protocols, and this is enforced
0572  * here.
0573  */
0574 struct gb_operation *
0575 gb_operation_create_flags(struct gb_connection *connection,
0576               u8 type, size_t request_size,
0577               size_t response_size, unsigned long flags,
0578               gfp_t gfp)
0579 {
0580     struct gb_operation *operation;
0581 
0582     if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
0583         return NULL;
0584     if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
0585         type &= ~GB_MESSAGE_TYPE_RESPONSE;
0586 
0587     if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
0588         flags &= GB_OPERATION_FLAG_USER_MASK;
0589 
0590     operation = gb_operation_create_common(connection, type,
0591                            request_size, response_size,
0592                            flags, gfp);
0593     if (operation)
0594         trace_gb_operation_create(operation);
0595 
0596     return operation;
0597 }
0598 EXPORT_SYMBOL_GPL(gb_operation_create_flags);
0599 
0600 struct gb_operation *
0601 gb_operation_create_core(struct gb_connection *connection,
0602              u8 type, size_t request_size,
0603              size_t response_size, unsigned long flags,
0604              gfp_t gfp)
0605 {
0606     struct gb_operation *operation;
0607 
0608     flags |= GB_OPERATION_FLAG_CORE;
0609 
0610     operation = gb_operation_create_common(connection, type,
0611                            request_size, response_size,
0612                            flags, gfp);
0613     if (operation)
0614         trace_gb_operation_create_core(operation);
0615 
0616     return operation;
0617 }
0618 
0619 /* Do not export this function. */
0620 
0621 size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
0622 {
0623     struct gb_host_device *hd = connection->hd;
0624 
0625     return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
0626 }
0627 EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
0628 
0629 static struct gb_operation *
0630 gb_operation_create_incoming(struct gb_connection *connection, u16 id,
0631                  u8 type, void *data, size_t size)
0632 {
0633     struct gb_operation *operation;
0634     size_t request_size;
0635     unsigned long flags = GB_OPERATION_FLAG_INCOMING;
0636 
0637     /* Caller has made sure we at least have a message header. */
0638     request_size = size - sizeof(struct gb_operation_msg_hdr);
0639 
0640     if (!id)
0641         flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
0642 
0643     operation = gb_operation_create_common(connection, type,
0644                            request_size,
0645                            GB_REQUEST_TYPE_INVALID,
0646                            flags, GFP_ATOMIC);
0647     if (!operation)
0648         return NULL;
0649 
0650     operation->id = id;
0651     memcpy(operation->request->header, data, size);
0652     trace_gb_operation_create_incoming(operation);
0653 
0654     return operation;
0655 }
0656 
0657 /*
0658  * Get an additional reference on an operation.
0659  */
0660 void gb_operation_get(struct gb_operation *operation)
0661 {
0662     kref_get(&operation->kref);
0663 }
0664 EXPORT_SYMBOL_GPL(gb_operation_get);
0665 
0666 /*
0667  * Destroy a previously created operation.
0668  */
0669 static void _gb_operation_destroy(struct kref *kref)
0670 {
0671     struct gb_operation *operation;
0672 
0673     operation = container_of(kref, struct gb_operation, kref);
0674 
0675     trace_gb_operation_destroy(operation);
0676 
0677     if (operation->response)
0678         gb_operation_message_free(operation->response);
0679     gb_operation_message_free(operation->request);
0680 
0681     kmem_cache_free(gb_operation_cache, operation);
0682 }
0683 
0684 /*
0685  * Drop a reference on an operation, and destroy it when the last
0686  * one is gone.
0687  */
0688 void gb_operation_put(struct gb_operation *operation)
0689 {
0690     if (WARN_ON(!operation))
0691         return;
0692 
0693     kref_put(&operation->kref, _gb_operation_destroy);
0694 }
0695 EXPORT_SYMBOL_GPL(gb_operation_put);
0696 
0697 /* Tell the requester we're done */
0698 static void gb_operation_sync_callback(struct gb_operation *operation)
0699 {
0700     complete(&operation->completion);
0701 }
0702 
0703 /**
0704  * gb_operation_request_send() - send an operation request message
0705  * @operation:  the operation to initiate
0706  * @callback:   the operation completion callback
0707  * @timeout:    operation timeout in milliseconds, or zero for no timeout
0708  * @gfp:    the memory flags to use for any allocations
0709  *
0710  * The caller has filled in any payload so the request message is ready to go.
0711  * The callback function supplied will be called when the response message has
0712  * arrived, a unidirectional request has been sent, or the operation is
0713  * cancelled, indicating that the operation is complete. The callback function
0714  * can fetch the result of the operation using gb_operation_result() if
0715  * desired.
0716  *
0717  * Return: 0 if the request was successfully queued in the host-driver queues,
0718  * or a negative errno.
0719  */
0720 int gb_operation_request_send(struct gb_operation *operation,
0721                   gb_operation_callback callback,
0722                   unsigned int timeout,
0723                   gfp_t gfp)
0724 {
0725     struct gb_connection *connection = operation->connection;
0726     struct gb_operation_msg_hdr *header;
0727     unsigned int cycle;
0728     int ret;
0729 
0730     if (gb_connection_is_offloaded(connection))
0731         return -EBUSY;
0732 
0733     if (!callback)
0734         return -EINVAL;
0735 
0736     /*
0737      * Record the callback function, which is executed in
0738      * non-atomic (workqueue) context when the final result
0739      * of an operation has been set.
0740      */
0741     operation->callback = callback;
0742 
0743     /*
0744      * Assign the operation's id, and store it in the request header.
0745      * Zero is a reserved operation id for unidirectional operations.
0746      */
0747     if (gb_operation_is_unidirectional(operation)) {
0748         operation->id = 0;
0749     } else {
0750         cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
0751         operation->id = (u16)(cycle % U16_MAX + 1);
0752     }
0753 
0754     header = operation->request->header;
0755     header->operation_id = cpu_to_le16(operation->id);
0756 
0757     gb_operation_result_set(operation, -EINPROGRESS);
0758 
0759     /*
0760      * Get an extra reference on the operation. It'll be dropped when the
0761      * operation completes.
0762      */
0763     gb_operation_get(operation);
0764     ret = gb_operation_get_active(operation);
0765     if (ret)
0766         goto err_put;
0767 
0768     ret = gb_message_send(operation->request, gfp);
0769     if (ret)
0770         goto err_put_active;
0771 
0772     if (timeout) {
0773         operation->timer.expires = jiffies + msecs_to_jiffies(timeout);
0774         add_timer(&operation->timer);
0775     }
0776 
0777     return 0;
0778 
0779 err_put_active:
0780     gb_operation_put_active(operation);
0781 err_put:
0782     gb_operation_put(operation);
0783 
0784     return ret;
0785 }
0786 EXPORT_SYMBOL_GPL(gb_operation_request_send);
0787 
0788 /*
0789  * Send a synchronous operation.  This function is expected to
0790  * block, returning only when the response has arrived, (or when an
0791  * error is detected.  The return value is the result of the
0792  * operation.
0793  */
0794 int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
0795                        unsigned int timeout)
0796 {
0797     int ret;
0798 
0799     ret = gb_operation_request_send(operation, gb_operation_sync_callback,
0800                     timeout, GFP_KERNEL);
0801     if (ret)
0802         return ret;
0803 
0804     ret = wait_for_completion_interruptible(&operation->completion);
0805     if (ret < 0) {
0806         /* Cancel the operation if interrupted */
0807         gb_operation_cancel(operation, -ECANCELED);
0808     }
0809 
0810     return gb_operation_result(operation);
0811 }
0812 EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
0813 
0814 /*
0815  * Send a response for an incoming operation request.  A non-zero
0816  * errno indicates a failed operation.
0817  *
0818  * If there is any response payload, the incoming request handler is
0819  * responsible for allocating the response message.  Otherwise the
0820  * it can simply supply the result errno; this function will
0821  * allocate the response message if necessary.
0822  */
0823 static int gb_operation_response_send(struct gb_operation *operation,
0824                       int errno)
0825 {
0826     struct gb_connection *connection = operation->connection;
0827     int ret;
0828 
0829     if (!operation->response &&
0830         !gb_operation_is_unidirectional(operation)) {
0831         if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
0832             return -ENOMEM;
0833     }
0834 
0835     /* Record the result */
0836     if (!gb_operation_result_set(operation, errno)) {
0837         dev_err(&connection->hd->dev, "request result already set\n");
0838         return -EIO;    /* Shouldn't happen */
0839     }
0840 
0841     /* Sender of request does not care about response. */
0842     if (gb_operation_is_unidirectional(operation))
0843         return 0;
0844 
0845     /* Reference will be dropped when message has been sent. */
0846     gb_operation_get(operation);
0847     ret = gb_operation_get_active(operation);
0848     if (ret)
0849         goto err_put;
0850 
0851     /* Fill in the response header and send it */
0852     operation->response->header->result = gb_operation_errno_map(errno);
0853 
0854     ret = gb_message_send(operation->response, GFP_KERNEL);
0855     if (ret)
0856         goto err_put_active;
0857 
0858     return 0;
0859 
0860 err_put_active:
0861     gb_operation_put_active(operation);
0862 err_put:
0863     gb_operation_put(operation);
0864 
0865     return ret;
0866 }
0867 
0868 /*
0869  * This function is called when a message send request has completed.
0870  */
0871 void greybus_message_sent(struct gb_host_device *hd,
0872               struct gb_message *message, int status)
0873 {
0874     struct gb_operation *operation = message->operation;
0875     struct gb_connection *connection = operation->connection;
0876 
0877     /*
0878      * If the message was a response, we just need to drop our
0879      * reference to the operation.  If an error occurred, report
0880      * it.
0881      *
0882      * For requests, if there's no error and the operation in not
0883      * unidirectional, there's nothing more to do until the response
0884      * arrives. If an error occurred attempting to send it, or if the
0885      * operation is unidrectional, record the result of the operation and
0886      * schedule its completion.
0887      */
0888     if (message == operation->response) {
0889         if (status) {
0890             dev_err(&connection->hd->dev,
0891                 "%s: error sending response 0x%02x: %d\n",
0892                 connection->name, operation->type, status);
0893         }
0894 
0895         gb_operation_put_active(operation);
0896         gb_operation_put(operation);
0897     } else if (status || gb_operation_is_unidirectional(operation)) {
0898         if (gb_operation_result_set(operation, status)) {
0899             queue_work(gb_operation_completion_wq,
0900                    &operation->work);
0901         }
0902     }
0903 }
0904 EXPORT_SYMBOL_GPL(greybus_message_sent);
0905 
0906 /*
0907  * We've received data on a connection, and it doesn't look like a
0908  * response, so we assume it's a request.
0909  *
0910  * This is called in interrupt context, so just copy the incoming
0911  * data into the request buffer and handle the rest via workqueue.
0912  */
0913 static void gb_connection_recv_request(struct gb_connection *connection,
0914                 const struct gb_operation_msg_hdr *header,
0915                 void *data, size_t size)
0916 {
0917     struct gb_operation *operation;
0918     u16 operation_id;
0919     u8 type;
0920     int ret;
0921 
0922     operation_id = le16_to_cpu(header->operation_id);
0923     type = header->type;
0924 
0925     operation = gb_operation_create_incoming(connection, operation_id,
0926                          type, data, size);
0927     if (!operation) {
0928         dev_err(&connection->hd->dev,
0929             "%s: can't create incoming operation\n",
0930             connection->name);
0931         return;
0932     }
0933 
0934     ret = gb_operation_get_active(operation);
0935     if (ret) {
0936         gb_operation_put(operation);
0937         return;
0938     }
0939     trace_gb_message_recv_request(operation->request);
0940 
0941     /*
0942      * The initial reference to the operation will be dropped when the
0943      * request handler returns.
0944      */
0945     if (gb_operation_result_set(operation, -EINPROGRESS))
0946         queue_work(connection->wq, &operation->work);
0947 }
0948 
0949 /*
0950  * We've received data that appears to be an operation response
0951  * message.  Look up the operation, and record that we've received
0952  * its response.
0953  *
0954  * This is called in interrupt context, so just copy the incoming
0955  * data into the response buffer and handle the rest via workqueue.
0956  */
0957 static void gb_connection_recv_response(struct gb_connection *connection,
0958                 const struct gb_operation_msg_hdr *header,
0959                 void *data, size_t size)
0960 {
0961     struct gb_operation *operation;
0962     struct gb_message *message;
0963     size_t message_size;
0964     u16 operation_id;
0965     int errno;
0966 
0967     operation_id = le16_to_cpu(header->operation_id);
0968 
0969     if (!operation_id) {
0970         dev_err_ratelimited(&connection->hd->dev,
0971                     "%s: invalid response id 0 received\n",
0972                     connection->name);
0973         return;
0974     }
0975 
0976     operation = gb_operation_find_outgoing(connection, operation_id);
0977     if (!operation) {
0978         dev_err_ratelimited(&connection->hd->dev,
0979                     "%s: unexpected response id 0x%04x received\n",
0980                     connection->name, operation_id);
0981         return;
0982     }
0983 
0984     errno = gb_operation_status_map(header->result);
0985     message = operation->response;
0986     message_size = sizeof(*header) + message->payload_size;
0987     if (!errno && size > message_size) {
0988         dev_err_ratelimited(&connection->hd->dev,
0989                     "%s: malformed response 0x%02x received (%zu > %zu)\n",
0990                     connection->name, header->type,
0991                     size, message_size);
0992         errno = -EMSGSIZE;
0993     } else if (!errno && size < message_size) {
0994         if (gb_operation_short_response_allowed(operation)) {
0995             message->payload_size = size - sizeof(*header);
0996         } else {
0997             dev_err_ratelimited(&connection->hd->dev,
0998                         "%s: short response 0x%02x received (%zu < %zu)\n",
0999                         connection->name, header->type,
1000                         size, message_size);
1001             errno = -EMSGSIZE;
1002         }
1003     }
1004 
1005     /* We must ignore the payload if a bad status is returned */
1006     if (errno)
1007         size = sizeof(*header);
1008 
1009     /* The rest will be handled in work queue context */
1010     if (gb_operation_result_set(operation, errno)) {
1011         memcpy(message->buffer, data, size);
1012 
1013         trace_gb_message_recv_response(message);
1014 
1015         queue_work(gb_operation_completion_wq, &operation->work);
1016     }
1017 
1018     gb_operation_put(operation);
1019 }
1020 
1021 /*
1022  * Handle data arriving on a connection.  As soon as we return the
1023  * supplied data buffer will be reused (so unless we do something
1024  * with, it's effectively dropped).
1025  */
1026 void gb_connection_recv(struct gb_connection *connection,
1027             void *data, size_t size)
1028 {
1029     struct gb_operation_msg_hdr header;
1030     struct device *dev = &connection->hd->dev;
1031     size_t msg_size;
1032 
1033     if (connection->state == GB_CONNECTION_STATE_DISABLED ||
1034         gb_connection_is_offloaded(connection)) {
1035         dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
1036                      connection->name, size);
1037         return;
1038     }
1039 
1040     if (size < sizeof(header)) {
1041         dev_err_ratelimited(dev, "%s: short message received\n",
1042                     connection->name);
1043         return;
1044     }
1045 
1046     /* Use memcpy as data may be unaligned */
1047     memcpy(&header, data, sizeof(header));
1048     msg_size = le16_to_cpu(header.size);
1049     if (size < msg_size) {
1050         dev_err_ratelimited(dev,
1051                     "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
1052                     connection->name,
1053                     le16_to_cpu(header.operation_id),
1054                     header.type, size, msg_size);
1055         return;     /* XXX Should still complete operation */
1056     }
1057 
1058     if (header.type & GB_MESSAGE_TYPE_RESPONSE) {
1059         gb_connection_recv_response(connection, &header, data,
1060                         msg_size);
1061     } else {
1062         gb_connection_recv_request(connection, &header, data,
1063                        msg_size);
1064     }
1065 }
1066 
1067 /*
1068  * Cancel an outgoing operation synchronously, and record the given error to
1069  * indicate why.
1070  */
1071 void gb_operation_cancel(struct gb_operation *operation, int errno)
1072 {
1073     if (WARN_ON(gb_operation_is_incoming(operation)))
1074         return;
1075 
1076     if (gb_operation_result_set(operation, errno)) {
1077         gb_message_cancel(operation->request);
1078         queue_work(gb_operation_completion_wq, &operation->work);
1079     }
1080     trace_gb_message_cancel_outgoing(operation->request);
1081 
1082     atomic_inc(&operation->waiters);
1083     wait_event(gb_operation_cancellation_queue,
1084            !gb_operation_is_active(operation));
1085     atomic_dec(&operation->waiters);
1086 }
1087 EXPORT_SYMBOL_GPL(gb_operation_cancel);
1088 
1089 /*
1090  * Cancel an incoming operation synchronously. Called during connection tear
1091  * down.
1092  */
1093 void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
1094 {
1095     if (WARN_ON(!gb_operation_is_incoming(operation)))
1096         return;
1097 
1098     if (!gb_operation_is_unidirectional(operation)) {
1099         /*
1100          * Make sure the request handler has submitted the response
1101          * before cancelling it.
1102          */
1103         flush_work(&operation->work);
1104         if (!gb_operation_result_set(operation, errno))
1105             gb_message_cancel(operation->response);
1106     }
1107     trace_gb_message_cancel_incoming(operation->response);
1108 
1109     atomic_inc(&operation->waiters);
1110     wait_event(gb_operation_cancellation_queue,
1111            !gb_operation_is_active(operation));
1112     atomic_dec(&operation->waiters);
1113 }
1114 
1115 /**
1116  * gb_operation_sync_timeout() - implement a "simple" synchronous operation
1117  * @connection: the Greybus connection to send this to
1118  * @type: the type of operation to send
1119  * @request: pointer to a memory buffer to copy the request from
1120  * @request_size: size of @request
1121  * @response: pointer to a memory buffer to copy the response to
1122  * @response_size: the size of @response.
1123  * @timeout: operation timeout in milliseconds
1124  *
1125  * This function implements a simple synchronous Greybus operation.  It sends
1126  * the provided operation request and waits (sleeps) until the corresponding
1127  * operation response message has been successfully received, or an error
1128  * occurs.  @request and @response are buffers to hold the request and response
1129  * data respectively, and if they are not NULL, their size must be specified in
1130  * @request_size and @response_size.
1131  *
1132  * If a response payload is to come back, and @response is not NULL,
1133  * @response_size number of bytes will be copied into @response if the operation
1134  * is successful.
1135  *
1136  * If there is an error, the response buffer is left alone.
1137  */
1138 int gb_operation_sync_timeout(struct gb_connection *connection, int type,
1139                   void *request, int request_size,
1140                   void *response, int response_size,
1141                   unsigned int timeout)
1142 {
1143     struct gb_operation *operation;
1144     int ret;
1145 
1146     if ((response_size && !response) ||
1147         (request_size && !request))
1148         return -EINVAL;
1149 
1150     operation = gb_operation_create(connection, type,
1151                     request_size, response_size,
1152                     GFP_KERNEL);
1153     if (!operation)
1154         return -ENOMEM;
1155 
1156     if (request_size)
1157         memcpy(operation->request->payload, request, request_size);
1158 
1159     ret = gb_operation_request_send_sync_timeout(operation, timeout);
1160     if (ret) {
1161         dev_err(&connection->hd->dev,
1162             "%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
1163             connection->name, operation->id, type, ret);
1164     } else {
1165         if (response_size) {
1166             memcpy(response, operation->response->payload,
1167                    response_size);
1168         }
1169     }
1170 
1171     gb_operation_put(operation);
1172 
1173     return ret;
1174 }
1175 EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
1176 
1177 /**
1178  * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
1179  * @connection:     connection to use
1180  * @type:       type of operation to send
1181  * @request:        memory buffer to copy the request from
1182  * @request_size:   size of @request
1183  * @timeout:        send timeout in milliseconds
1184  *
1185  * Initiate a unidirectional operation by sending a request message and
1186  * waiting for it to be acknowledged as sent by the host device.
1187  *
1188  * Note that successful send of a unidirectional operation does not imply that
1189  * the request as actually reached the remote end of the connection.
1190  */
1191 int gb_operation_unidirectional_timeout(struct gb_connection *connection,
1192                     int type, void *request,
1193                     int request_size,
1194                     unsigned int timeout)
1195 {
1196     struct gb_operation *operation;
1197     int ret;
1198 
1199     if (request_size && !request)
1200         return -EINVAL;
1201 
1202     operation = gb_operation_create_flags(connection, type,
1203                           request_size, 0,
1204                           GB_OPERATION_FLAG_UNIDIRECTIONAL,
1205                           GFP_KERNEL);
1206     if (!operation)
1207         return -ENOMEM;
1208 
1209     if (request_size)
1210         memcpy(operation->request->payload, request, request_size);
1211 
1212     ret = gb_operation_request_send_sync_timeout(operation, timeout);
1213     if (ret) {
1214         dev_err(&connection->hd->dev,
1215             "%s: unidirectional operation of type 0x%02x failed: %d\n",
1216             connection->name, type, ret);
1217     }
1218 
1219     gb_operation_put(operation);
1220 
1221     return ret;
1222 }
1223 EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
1224 
1225 int __init gb_operation_init(void)
1226 {
1227     gb_message_cache = kmem_cache_create("gb_message_cache",
1228                          sizeof(struct gb_message), 0, 0,
1229                          NULL);
1230     if (!gb_message_cache)
1231         return -ENOMEM;
1232 
1233     gb_operation_cache = kmem_cache_create("gb_operation_cache",
1234                            sizeof(struct gb_operation), 0,
1235                            0, NULL);
1236     if (!gb_operation_cache)
1237         goto err_destroy_message_cache;
1238 
1239     gb_operation_completion_wq = alloc_workqueue("greybus_completion",
1240                              0, 0);
1241     if (!gb_operation_completion_wq)
1242         goto err_destroy_operation_cache;
1243 
1244     return 0;
1245 
1246 err_destroy_operation_cache:
1247     kmem_cache_destroy(gb_operation_cache);
1248     gb_operation_cache = NULL;
1249 err_destroy_message_cache:
1250     kmem_cache_destroy(gb_message_cache);
1251     gb_message_cache = NULL;
1252 
1253     return -ENOMEM;
1254 }
1255 
1256 void gb_operation_exit(void)
1257 {
1258     destroy_workqueue(gb_operation_completion_wq);
1259     gb_operation_completion_wq = NULL;
1260     kmem_cache_destroy(gb_operation_cache);
1261     gb_operation_cache = NULL;
1262     kmem_cache_destroy(gb_message_cache);
1263     gb_message_cache = NULL;
1264 }