Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright(C) 2016 Linaro Limited. All rights reserved.
0004  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
0005  */
0006 
0007 #include <linux/atomic.h>
0008 #include <linux/circ_buf.h>
0009 #include <linux/coresight.h>
0010 #include <linux/perf_event.h>
0011 #include <linux/slab.h>
0012 #include "coresight-priv.h"
0013 #include "coresight-tmc.h"
0014 #include "coresight-etm-perf.h"
0015 
0016 static int tmc_set_etf_buffer(struct coresight_device *csdev,
0017                   struct perf_output_handle *handle);
0018 
0019 static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
0020 {
0021     CS_UNLOCK(drvdata->base);
0022 
0023     /* Wait for TMCSReady bit to be set */
0024     tmc_wait_for_tmcready(drvdata);
0025 
0026     writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
0027     writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
0028                TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
0029                TMC_FFCR_TRIGON_TRIGIN,
0030                drvdata->base + TMC_FFCR);
0031 
0032     writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
0033     tmc_enable_hw(drvdata);
0034 
0035     CS_LOCK(drvdata->base);
0036 }
0037 
0038 static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
0039 {
0040     int rc = coresight_claim_device(drvdata->csdev);
0041 
0042     if (rc)
0043         return rc;
0044 
0045     __tmc_etb_enable_hw(drvdata);
0046     return 0;
0047 }
0048 
0049 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
0050 {
0051     char *bufp;
0052     u32 read_data, lost;
0053 
0054     /* Check if the buffer wrapped around. */
0055     lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
0056     bufp = drvdata->buf;
0057     drvdata->len = 0;
0058     while (1) {
0059         read_data = readl_relaxed(drvdata->base + TMC_RRD);
0060         if (read_data == 0xFFFFFFFF)
0061             break;
0062         memcpy(bufp, &read_data, 4);
0063         bufp += 4;
0064         drvdata->len += 4;
0065     }
0066 
0067     if (lost)
0068         coresight_insert_barrier_packet(drvdata->buf);
0069     return;
0070 }
0071 
0072 static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
0073 {
0074     CS_UNLOCK(drvdata->base);
0075 
0076     tmc_flush_and_stop(drvdata);
0077     /*
0078      * When operating in sysFS mode the content of the buffer needs to be
0079      * read before the TMC is disabled.
0080      */
0081     if (drvdata->mode == CS_MODE_SYSFS)
0082         tmc_etb_dump_hw(drvdata);
0083     tmc_disable_hw(drvdata);
0084 
0085     CS_LOCK(drvdata->base);
0086 }
0087 
0088 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
0089 {
0090     __tmc_etb_disable_hw(drvdata);
0091     coresight_disclaim_device(drvdata->csdev);
0092 }
0093 
0094 static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
0095 {
0096     CS_UNLOCK(drvdata->base);
0097 
0098     /* Wait for TMCSReady bit to be set */
0099     tmc_wait_for_tmcready(drvdata);
0100 
0101     writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
0102     writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
0103                drvdata->base + TMC_FFCR);
0104     writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
0105     tmc_enable_hw(drvdata);
0106 
0107     CS_LOCK(drvdata->base);
0108 }
0109 
0110 static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
0111 {
0112     int rc = coresight_claim_device(drvdata->csdev);
0113 
0114     if (rc)
0115         return rc;
0116 
0117     __tmc_etf_enable_hw(drvdata);
0118     return 0;
0119 }
0120 
0121 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
0122 {
0123     struct coresight_device *csdev = drvdata->csdev;
0124 
0125     CS_UNLOCK(drvdata->base);
0126 
0127     tmc_flush_and_stop(drvdata);
0128     tmc_disable_hw(drvdata);
0129     coresight_disclaim_device_unlocked(csdev);
0130     CS_LOCK(drvdata->base);
0131 }
0132 
0133 /*
0134  * Return the available trace data in the buffer from @pos, with
0135  * a maximum limit of @len, updating the @bufpp on where to
0136  * find it.
0137  */
0138 ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
0139                 loff_t pos, size_t len, char **bufpp)
0140 {
0141     ssize_t actual = len;
0142 
0143     /* Adjust the len to available size @pos */
0144     if (pos + actual > drvdata->len)
0145         actual = drvdata->len - pos;
0146     if (actual > 0)
0147         *bufpp = drvdata->buf + pos;
0148     return actual;
0149 }
0150 
0151 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
0152 {
0153     int ret = 0;
0154     bool used = false;
0155     char *buf = NULL;
0156     unsigned long flags;
0157     struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0158 
0159     /*
0160      * If we don't have a buffer release the lock and allocate memory.
0161      * Otherwise keep the lock and move along.
0162      */
0163     spin_lock_irqsave(&drvdata->spinlock, flags);
0164     if (!drvdata->buf) {
0165         spin_unlock_irqrestore(&drvdata->spinlock, flags);
0166 
0167         /* Allocating the memory here while outside of the spinlock */
0168         buf = kzalloc(drvdata->size, GFP_KERNEL);
0169         if (!buf)
0170             return -ENOMEM;
0171 
0172         /* Let's try again */
0173         spin_lock_irqsave(&drvdata->spinlock, flags);
0174     }
0175 
0176     if (drvdata->reading) {
0177         ret = -EBUSY;
0178         goto out;
0179     }
0180 
0181     /*
0182      * In sysFS mode we can have multiple writers per sink.  Since this
0183      * sink is already enabled no memory is needed and the HW need not be
0184      * touched.
0185      */
0186     if (drvdata->mode == CS_MODE_SYSFS) {
0187         atomic_inc(csdev->refcnt);
0188         goto out;
0189     }
0190 
0191     /*
0192      * If drvdata::buf isn't NULL, memory was allocated for a previous
0193      * trace run but wasn't read.  If so simply zero-out the memory.
0194      * Otherwise use the memory allocated above.
0195      *
0196      * The memory is freed when users read the buffer using the
0197      * /dev/xyz.{etf|etb} interface.  See tmc_read_unprepare_etf() for
0198      * details.
0199      */
0200     if (drvdata->buf) {
0201         memset(drvdata->buf, 0, drvdata->size);
0202     } else {
0203         used = true;
0204         drvdata->buf = buf;
0205     }
0206 
0207     ret = tmc_etb_enable_hw(drvdata);
0208     if (!ret) {
0209         drvdata->mode = CS_MODE_SYSFS;
0210         atomic_inc(csdev->refcnt);
0211     } else {
0212         /* Free up the buffer if we failed to enable */
0213         used = false;
0214     }
0215 out:
0216     spin_unlock_irqrestore(&drvdata->spinlock, flags);
0217 
0218     /* Free memory outside the spinlock if need be */
0219     if (!used)
0220         kfree(buf);
0221 
0222     return ret;
0223 }
0224 
0225 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
0226 {
0227     int ret = 0;
0228     pid_t pid;
0229     unsigned long flags;
0230     struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0231     struct perf_output_handle *handle = data;
0232     struct cs_buffers *buf = etm_perf_sink_config(handle);
0233 
0234     spin_lock_irqsave(&drvdata->spinlock, flags);
0235     do {
0236         ret = -EINVAL;
0237         if (drvdata->reading)
0238             break;
0239         /*
0240          * No need to continue if the ETB/ETF is already operated
0241          * from sysFS.
0242          */
0243         if (drvdata->mode == CS_MODE_SYSFS) {
0244             ret = -EBUSY;
0245             break;
0246         }
0247 
0248         /* Get a handle on the pid of the process to monitor */
0249         pid = buf->pid;
0250 
0251         if (drvdata->pid != -1 && drvdata->pid != pid) {
0252             ret = -EBUSY;
0253             break;
0254         }
0255 
0256         ret = tmc_set_etf_buffer(csdev, handle);
0257         if (ret)
0258             break;
0259 
0260         /*
0261          * No HW configuration is needed if the sink is already in
0262          * use for this session.
0263          */
0264         if (drvdata->pid == pid) {
0265             atomic_inc(csdev->refcnt);
0266             break;
0267         }
0268 
0269         ret  = tmc_etb_enable_hw(drvdata);
0270         if (!ret) {
0271             /* Associate with monitored process. */
0272             drvdata->pid = pid;
0273             drvdata->mode = CS_MODE_PERF;
0274             atomic_inc(csdev->refcnt);
0275         }
0276     } while (0);
0277     spin_unlock_irqrestore(&drvdata->spinlock, flags);
0278 
0279     return ret;
0280 }
0281 
0282 static int tmc_enable_etf_sink(struct coresight_device *csdev,
0283                    u32 mode, void *data)
0284 {
0285     int ret;
0286 
0287     switch (mode) {
0288     case CS_MODE_SYSFS:
0289         ret = tmc_enable_etf_sink_sysfs(csdev);
0290         break;
0291     case CS_MODE_PERF:
0292         ret = tmc_enable_etf_sink_perf(csdev, data);
0293         break;
0294     /* We shouldn't be here */
0295     default:
0296         ret = -EINVAL;
0297         break;
0298     }
0299 
0300     if (ret)
0301         return ret;
0302 
0303     dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n");
0304     return 0;
0305 }
0306 
0307 static int tmc_disable_etf_sink(struct coresight_device *csdev)
0308 {
0309     unsigned long flags;
0310     struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0311 
0312     spin_lock_irqsave(&drvdata->spinlock, flags);
0313 
0314     if (drvdata->reading) {
0315         spin_unlock_irqrestore(&drvdata->spinlock, flags);
0316         return -EBUSY;
0317     }
0318 
0319     if (atomic_dec_return(csdev->refcnt)) {
0320         spin_unlock_irqrestore(&drvdata->spinlock, flags);
0321         return -EBUSY;
0322     }
0323 
0324     /* Complain if we (somehow) got out of sync */
0325     WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
0326     tmc_etb_disable_hw(drvdata);
0327     /* Dissociate from monitored process. */
0328     drvdata->pid = -1;
0329     drvdata->mode = CS_MODE_DISABLED;
0330 
0331     spin_unlock_irqrestore(&drvdata->spinlock, flags);
0332 
0333     dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
0334     return 0;
0335 }
0336 
0337 static int tmc_enable_etf_link(struct coresight_device *csdev,
0338                    int inport, int outport)
0339 {
0340     int ret = 0;
0341     unsigned long flags;
0342     struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0343     bool first_enable = false;
0344 
0345     spin_lock_irqsave(&drvdata->spinlock, flags);
0346     if (drvdata->reading) {
0347         spin_unlock_irqrestore(&drvdata->spinlock, flags);
0348         return -EBUSY;
0349     }
0350 
0351     if (atomic_read(&csdev->refcnt[0]) == 0) {
0352         ret = tmc_etf_enable_hw(drvdata);
0353         if (!ret) {
0354             drvdata->mode = CS_MODE_SYSFS;
0355             first_enable = true;
0356         }
0357     }
0358     if (!ret)
0359         atomic_inc(&csdev->refcnt[0]);
0360     spin_unlock_irqrestore(&drvdata->spinlock, flags);
0361 
0362     if (first_enable)
0363         dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
0364     return ret;
0365 }
0366 
0367 static void tmc_disable_etf_link(struct coresight_device *csdev,
0368                  int inport, int outport)
0369 {
0370     unsigned long flags;
0371     struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0372     bool last_disable = false;
0373 
0374     spin_lock_irqsave(&drvdata->spinlock, flags);
0375     if (drvdata->reading) {
0376         spin_unlock_irqrestore(&drvdata->spinlock, flags);
0377         return;
0378     }
0379 
0380     if (atomic_dec_return(&csdev->refcnt[0]) == 0) {
0381         tmc_etf_disable_hw(drvdata);
0382         drvdata->mode = CS_MODE_DISABLED;
0383         last_disable = true;
0384     }
0385     spin_unlock_irqrestore(&drvdata->spinlock, flags);
0386 
0387     if (last_disable)
0388         dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
0389 }
0390 
0391 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
0392                   struct perf_event *event, void **pages,
0393                   int nr_pages, bool overwrite)
0394 {
0395     int node;
0396     struct cs_buffers *buf;
0397 
0398     node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
0399 
0400     /* Allocate memory structure for interaction with Perf */
0401     buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
0402     if (!buf)
0403         return NULL;
0404 
0405     buf->pid = task_pid_nr(event->owner);
0406     buf->snapshot = overwrite;
0407     buf->nr_pages = nr_pages;
0408     buf->data_pages = pages;
0409 
0410     return buf;
0411 }
0412 
0413 static void tmc_free_etf_buffer(void *config)
0414 {
0415     struct cs_buffers *buf = config;
0416 
0417     kfree(buf);
0418 }
0419 
0420 static int tmc_set_etf_buffer(struct coresight_device *csdev,
0421                   struct perf_output_handle *handle)
0422 {
0423     int ret = 0;
0424     unsigned long head;
0425     struct cs_buffers *buf = etm_perf_sink_config(handle);
0426 
0427     if (!buf)
0428         return -EINVAL;
0429 
0430     /* wrap head around to the amount of space we have */
0431     head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
0432 
0433     /* find the page to write to */
0434     buf->cur = head / PAGE_SIZE;
0435 
0436     /* and offset within that page */
0437     buf->offset = head % PAGE_SIZE;
0438 
0439     local_set(&buf->data_size, 0);
0440 
0441     return ret;
0442 }
0443 
0444 static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
0445                   struct perf_output_handle *handle,
0446                   void *sink_config)
0447 {
0448     bool lost = false;
0449     int i, cur;
0450     const u32 *barrier;
0451     u32 *buf_ptr;
0452     u64 read_ptr, write_ptr;
0453     u32 status;
0454     unsigned long offset, to_read = 0, flags;
0455     struct cs_buffers *buf = sink_config;
0456     struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0457 
0458     if (!buf)
0459         return 0;
0460 
0461     /* This shouldn't happen */
0462     if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
0463         return 0;
0464 
0465     spin_lock_irqsave(&drvdata->spinlock, flags);
0466 
0467     /* Don't do anything if another tracer is using this sink */
0468     if (atomic_read(csdev->refcnt) != 1)
0469         goto out;
0470 
0471     CS_UNLOCK(drvdata->base);
0472 
0473     tmc_flush_and_stop(drvdata);
0474 
0475     read_ptr = tmc_read_rrp(drvdata);
0476     write_ptr = tmc_read_rwp(drvdata);
0477 
0478     /*
0479      * Get a hold of the status register and see if a wrap around
0480      * has occurred.  If so adjust things accordingly.
0481      */
0482     status = readl_relaxed(drvdata->base + TMC_STS);
0483     if (status & TMC_STS_FULL) {
0484         lost = true;
0485         to_read = drvdata->size;
0486     } else {
0487         to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
0488     }
0489 
0490     /*
0491      * The TMC RAM buffer may be bigger than the space available in the
0492      * perf ring buffer (handle->size).  If so advance the RRP so that we
0493      * get the latest trace data.  In snapshot mode none of that matters
0494      * since we are expected to clobber stale data in favour of the latest
0495      * traces.
0496      */
0497     if (!buf->snapshot && to_read > handle->size) {
0498         u32 mask = tmc_get_memwidth_mask(drvdata);
0499 
0500         /*
0501          * Make sure the new size is aligned in accordance with the
0502          * requirement explained in function tmc_get_memwidth_mask().
0503          */
0504         to_read = handle->size & mask;
0505         /* Move the RAM read pointer up */
0506         read_ptr = (write_ptr + drvdata->size) - to_read;
0507         /* Make sure we are still within our limits */
0508         if (read_ptr > (drvdata->size - 1))
0509             read_ptr -= drvdata->size;
0510         /* Tell the HW */
0511         tmc_write_rrp(drvdata, read_ptr);
0512         lost = true;
0513     }
0514 
0515     /*
0516      * Don't set the TRUNCATED flag in snapshot mode because 1) the
0517      * captured buffer is expected to be truncated and 2) a full buffer
0518      * prevents the event from being re-enabled by the perf core,
0519      * resulting in stale data being send to user space.
0520      */
0521     if (!buf->snapshot && lost)
0522         perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
0523 
0524     cur = buf->cur;
0525     offset = buf->offset;
0526     barrier = coresight_barrier_pkt;
0527 
0528     /* for every byte to read */
0529     for (i = 0; i < to_read; i += 4) {
0530         buf_ptr = buf->data_pages[cur] + offset;
0531         *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
0532 
0533         if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
0534             *buf_ptr = *barrier;
0535             barrier++;
0536         }
0537 
0538         offset += 4;
0539         if (offset >= PAGE_SIZE) {
0540             offset = 0;
0541             cur++;
0542             /* wrap around at the end of the buffer */
0543             cur &= buf->nr_pages - 1;
0544         }
0545     }
0546 
0547     /*
0548      * In snapshot mode we simply increment the head by the number of byte
0549      * that were written.  User space will figure out how many bytes to get
0550      * from the AUX buffer based on the position of the head.
0551      */
0552     if (buf->snapshot)
0553         handle->head += to_read;
0554 
0555     /*
0556      * CS_LOCK() contains mb() so it can ensure visibility of the AUX trace
0557      * data before the aux_head is updated via perf_aux_output_end(), which
0558      * is expected by the perf ring buffer.
0559      */
0560     CS_LOCK(drvdata->base);
0561 out:
0562     spin_unlock_irqrestore(&drvdata->spinlock, flags);
0563 
0564     return to_read;
0565 }
0566 
0567 static const struct coresight_ops_sink tmc_etf_sink_ops = {
0568     .enable     = tmc_enable_etf_sink,
0569     .disable    = tmc_disable_etf_sink,
0570     .alloc_buffer   = tmc_alloc_etf_buffer,
0571     .free_buffer    = tmc_free_etf_buffer,
0572     .update_buffer  = tmc_update_etf_buffer,
0573 };
0574 
0575 static const struct coresight_ops_link tmc_etf_link_ops = {
0576     .enable     = tmc_enable_etf_link,
0577     .disable    = tmc_disable_etf_link,
0578 };
0579 
0580 const struct coresight_ops tmc_etb_cs_ops = {
0581     .sink_ops   = &tmc_etf_sink_ops,
0582 };
0583 
0584 const struct coresight_ops tmc_etf_cs_ops = {
0585     .sink_ops   = &tmc_etf_sink_ops,
0586     .link_ops   = &tmc_etf_link_ops,
0587 };
0588 
0589 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
0590 {
0591     enum tmc_mode mode;
0592     int ret = 0;
0593     unsigned long flags;
0594 
0595     /* config types are set a boot time and never change */
0596     if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
0597              drvdata->config_type != TMC_CONFIG_TYPE_ETF))
0598         return -EINVAL;
0599 
0600     spin_lock_irqsave(&drvdata->spinlock, flags);
0601 
0602     if (drvdata->reading) {
0603         ret = -EBUSY;
0604         goto out;
0605     }
0606 
0607     /* Don't interfere if operated from Perf */
0608     if (drvdata->mode == CS_MODE_PERF) {
0609         ret = -EINVAL;
0610         goto out;
0611     }
0612 
0613     /* If drvdata::buf is NULL the trace data has been read already */
0614     if (drvdata->buf == NULL) {
0615         ret = -EINVAL;
0616         goto out;
0617     }
0618 
0619     /* Disable the TMC if need be */
0620     if (drvdata->mode == CS_MODE_SYSFS) {
0621         /* There is no point in reading a TMC in HW FIFO mode */
0622         mode = readl_relaxed(drvdata->base + TMC_MODE);
0623         if (mode != TMC_MODE_CIRCULAR_BUFFER) {
0624             ret = -EINVAL;
0625             goto out;
0626         }
0627         __tmc_etb_disable_hw(drvdata);
0628     }
0629 
0630     drvdata->reading = true;
0631 out:
0632     spin_unlock_irqrestore(&drvdata->spinlock, flags);
0633 
0634     return ret;
0635 }
0636 
0637 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
0638 {
0639     char *buf = NULL;
0640     enum tmc_mode mode;
0641     unsigned long flags;
0642 
0643     /* config types are set a boot time and never change */
0644     if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
0645              drvdata->config_type != TMC_CONFIG_TYPE_ETF))
0646         return -EINVAL;
0647 
0648     spin_lock_irqsave(&drvdata->spinlock, flags);
0649 
0650     /* Re-enable the TMC if need be */
0651     if (drvdata->mode == CS_MODE_SYSFS) {
0652         /* There is no point in reading a TMC in HW FIFO mode */
0653         mode = readl_relaxed(drvdata->base + TMC_MODE);
0654         if (mode != TMC_MODE_CIRCULAR_BUFFER) {
0655             spin_unlock_irqrestore(&drvdata->spinlock, flags);
0656             return -EINVAL;
0657         }
0658         /*
0659          * The trace run will continue with the same allocated trace
0660          * buffer. As such zero-out the buffer so that we don't end
0661          * up with stale data.
0662          *
0663          * Since the tracer is still enabled drvdata::buf
0664          * can't be NULL.
0665          */
0666         memset(drvdata->buf, 0, drvdata->size);
0667         __tmc_etb_enable_hw(drvdata);
0668     } else {
0669         /*
0670          * The ETB/ETF is not tracing and the buffer was just read.
0671          * As such prepare to free the trace buffer.
0672          */
0673         buf = drvdata->buf;
0674         drvdata->buf = NULL;
0675     }
0676 
0677     drvdata->reading = false;
0678     spin_unlock_irqrestore(&drvdata->spinlock, flags);
0679 
0680     /*
0681      * Free allocated memory outside of the spinlock.  There is no need
0682      * to assert the validity of 'buf' since calling kfree(NULL) is safe.
0683      */
0684     kfree(buf);
0685 
0686     return 0;
0687 }