Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Intel(R) Trace Hub Memory Storage Unit
0004  *
0005  * Copyright (C) 2014-2015 Intel Corporation.
0006  */
0007 
0008 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0009 
0010 #include <linux/types.h>
0011 #include <linux/module.h>
0012 #include <linux/device.h>
0013 #include <linux/uaccess.h>
0014 #include <linux/sizes.h>
0015 #include <linux/printk.h>
0016 #include <linux/slab.h>
0017 #include <linux/mm.h>
0018 #include <linux/fs.h>
0019 #include <linux/io.h>
0020 #include <linux/workqueue.h>
0021 #include <linux/dma-mapping.h>
0022 
0023 #ifdef CONFIG_X86
0024 #include <asm/set_memory.h>
0025 #endif
0026 
0027 #include <linux/intel_th.h>
0028 #include "intel_th.h"
0029 #include "msu.h"
0030 
0031 #define msc_dev(x) (&(x)->thdev->dev)
0032 
0033 /*
0034  * Lockout state transitions:
0035  *   READY -> INUSE -+-> LOCKED -+-> READY -> etc.
0036  *                   \-----------/
0037  * WIN_READY:   window can be used by HW
0038  * WIN_INUSE:   window is in use
0039  * WIN_LOCKED:  window is filled up and is being processed by the buffer
0040  * handling code
0041  *
0042  * All state transitions happen automatically, except for the LOCKED->READY,
0043  * which needs to be signalled by the buffer code by calling
0044  * intel_th_msc_window_unlock().
0045  *
0046  * When the interrupt handler has to switch to the next window, it checks
0047  * whether it's READY, and if it is, it performs the switch and tracing
0048  * continues. If it's LOCKED, it stops the trace.
0049  */
0050 enum lockout_state {
0051     WIN_READY = 0,
0052     WIN_INUSE,
0053     WIN_LOCKED
0054 };
0055 
0056 /**
0057  * struct msc_window - multiblock mode window descriptor
0058  * @entry:  window list linkage (msc::win_list)
0059  * @pgoff:  page offset into the buffer that this window starts at
0060  * @lockout:    lockout state, see comment below
0061  * @lo_lock:    lockout state serialization
0062  * @nr_blocks:  number of blocks (pages) in this window
0063  * @nr_segs:    number of segments in this window (<= @nr_blocks)
0064  * @_sgt:   array of block descriptors
0065  * @sgt:    array of block descriptors
0066  */
0067 struct msc_window {
0068     struct list_head    entry;
0069     unsigned long       pgoff;
0070     enum lockout_state  lockout;
0071     spinlock_t      lo_lock;
0072     unsigned int        nr_blocks;
0073     unsigned int        nr_segs;
0074     struct msc      *msc;
0075     struct sg_table     _sgt;
0076     struct sg_table     *sgt;
0077 };
0078 
0079 /**
0080  * struct msc_iter - iterator for msc buffer
0081  * @entry:      msc::iter_list linkage
0082  * @msc:        pointer to the MSC device
0083  * @start_win:      oldest window
0084  * @win:        current window
0085  * @offset:     current logical offset into the buffer
0086  * @start_block:    oldest block in the window
0087  * @block:      block number in the window
0088  * @block_off:      offset into current block
0089  * @wrap_count:     block wrapping handling
0090  * @eof:        end of buffer reached
0091  */
0092 struct msc_iter {
0093     struct list_head    entry;
0094     struct msc      *msc;
0095     struct msc_window   *start_win;
0096     struct msc_window   *win;
0097     unsigned long       offset;
0098     struct scatterlist  *start_block;
0099     struct scatterlist  *block;
0100     unsigned int        block_off;
0101     unsigned int        wrap_count;
0102     unsigned int        eof;
0103 };
0104 
0105 /**
0106  * struct msc - MSC device representation
0107  * @reg_base:       register window base address
0108  * @thdev:      intel_th_device pointer
0109  * @mbuf:       MSU buffer, if assigned
0110  * @mbuf_priv       MSU buffer's private data, if @mbuf
0111  * @win_list:       list of windows in multiblock mode
0112  * @single_sgt:     single mode buffer
0113  * @cur_win:        current window
0114  * @nr_pages:       total number of pages allocated for this buffer
0115  * @single_sz:      amount of data in single mode
0116  * @single_wrap:    single mode wrap occurred
0117  * @base:       buffer's base pointer
0118  * @base_addr:      buffer's base address
0119  * @user_count:     number of users of the buffer
0120  * @mmap_count:     number of mappings
0121  * @buf_mutex:      mutex to serialize access to buffer-related bits
0122 
0123  * @enabled:        MSC is enabled
0124  * @wrap:       wrapping is enabled
0125  * @mode:       MSC operating mode
0126  * @burst_len:      write burst length
0127  * @index:      number of this MSC in the MSU
0128  */
0129 struct msc {
0130     void __iomem        *reg_base;
0131     void __iomem        *msu_base;
0132     struct intel_th_device  *thdev;
0133 
0134     const struct msu_buffer *mbuf;
0135     void            *mbuf_priv;
0136 
0137     struct work_struct  work;
0138     struct list_head    win_list;
0139     struct sg_table     single_sgt;
0140     struct msc_window   *cur_win;
0141     struct msc_window   *switch_on_unlock;
0142     unsigned long       nr_pages;
0143     unsigned long       single_sz;
0144     unsigned int        single_wrap : 1;
0145     void            *base;
0146     dma_addr_t      base_addr;
0147     u32         orig_addr;
0148     u32         orig_sz;
0149 
0150     /* <0: no buffer, 0: no users, >0: active users */
0151     atomic_t        user_count;
0152 
0153     atomic_t        mmap_count;
0154     struct mutex        buf_mutex;
0155 
0156     struct list_head    iter_list;
0157 
0158     bool            stop_on_full;
0159 
0160     /* config */
0161     unsigned int        enabled : 1,
0162                 wrap    : 1,
0163                 do_irq  : 1,
0164                 multi_is_broken : 1;
0165     unsigned int        mode;
0166     unsigned int        burst_len;
0167     unsigned int        index;
0168 };
0169 
0170 static LIST_HEAD(msu_buffer_list);
0171 static DEFINE_MUTEX(msu_buffer_mutex);
0172 
0173 /**
0174  * struct msu_buffer_entry - internal MSU buffer bookkeeping
0175  * @entry:  link to msu_buffer_list
0176  * @mbuf:   MSU buffer object
0177  * @owner:  module that provides this MSU buffer
0178  */
0179 struct msu_buffer_entry {
0180     struct list_head    entry;
0181     const struct msu_buffer *mbuf;
0182     struct module       *owner;
0183 };
0184 
0185 static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name)
0186 {
0187     struct msu_buffer_entry *mbe;
0188 
0189     lockdep_assert_held(&msu_buffer_mutex);
0190 
0191     list_for_each_entry(mbe, &msu_buffer_list, entry) {
0192         if (!strcmp(mbe->mbuf->name, name))
0193             return mbe;
0194     }
0195 
0196     return NULL;
0197 }
0198 
0199 static const struct msu_buffer *
0200 msu_buffer_get(const char *name)
0201 {
0202     struct msu_buffer_entry *mbe;
0203 
0204     mutex_lock(&msu_buffer_mutex);
0205     mbe = __msu_buffer_entry_find(name);
0206     if (mbe && !try_module_get(mbe->owner))
0207         mbe = NULL;
0208     mutex_unlock(&msu_buffer_mutex);
0209 
0210     return mbe ? mbe->mbuf : NULL;
0211 }
0212 
0213 static void msu_buffer_put(const struct msu_buffer *mbuf)
0214 {
0215     struct msu_buffer_entry *mbe;
0216 
0217     mutex_lock(&msu_buffer_mutex);
0218     mbe = __msu_buffer_entry_find(mbuf->name);
0219     if (mbe)
0220         module_put(mbe->owner);
0221     mutex_unlock(&msu_buffer_mutex);
0222 }
0223 
0224 int intel_th_msu_buffer_register(const struct msu_buffer *mbuf,
0225                  struct module *owner)
0226 {
0227     struct msu_buffer_entry *mbe;
0228     int ret = 0;
0229 
0230     mbe = kzalloc(sizeof(*mbe), GFP_KERNEL);
0231     if (!mbe)
0232         return -ENOMEM;
0233 
0234     mutex_lock(&msu_buffer_mutex);
0235     if (__msu_buffer_entry_find(mbuf->name)) {
0236         ret = -EEXIST;
0237         kfree(mbe);
0238         goto unlock;
0239     }
0240 
0241     mbe->mbuf = mbuf;
0242     mbe->owner = owner;
0243     list_add_tail(&mbe->entry, &msu_buffer_list);
0244 unlock:
0245     mutex_unlock(&msu_buffer_mutex);
0246 
0247     return ret;
0248 }
0249 EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register);
0250 
0251 void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf)
0252 {
0253     struct msu_buffer_entry *mbe;
0254 
0255     mutex_lock(&msu_buffer_mutex);
0256     mbe = __msu_buffer_entry_find(mbuf->name);
0257     if (mbe) {
0258         list_del(&mbe->entry);
0259         kfree(mbe);
0260     }
0261     mutex_unlock(&msu_buffer_mutex);
0262 }
0263 EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister);
0264 
0265 static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
0266 {
0267     /* header hasn't been written */
0268     if (!bdesc->valid_dw)
0269         return true;
0270 
0271     /* valid_dw includes the header */
0272     if (!msc_data_sz(bdesc))
0273         return true;
0274 
0275     return false;
0276 }
0277 
0278 static inline struct scatterlist *msc_win_base_sg(struct msc_window *win)
0279 {
0280     return win->sgt->sgl;
0281 }
0282 
0283 static inline struct msc_block_desc *msc_win_base(struct msc_window *win)
0284 {
0285     return sg_virt(msc_win_base_sg(win));
0286 }
0287 
0288 static inline dma_addr_t msc_win_base_dma(struct msc_window *win)
0289 {
0290     return sg_dma_address(msc_win_base_sg(win));
0291 }
0292 
0293 static inline unsigned long
0294 msc_win_base_pfn(struct msc_window *win)
0295 {
0296     return PFN_DOWN(msc_win_base_dma(win));
0297 }
0298 
0299 /**
0300  * msc_is_last_win() - check if a window is the last one for a given MSC
0301  * @win:    window
0302  * Return:  true if @win is the last window in MSC's multiblock buffer
0303  */
0304 static inline bool msc_is_last_win(struct msc_window *win)
0305 {
0306     return win->entry.next == &win->msc->win_list;
0307 }
0308 
0309 /**
0310  * msc_next_window() - return next window in the multiblock buffer
0311  * @win:    current window
0312  *
0313  * Return:  window following the current one
0314  */
0315 static struct msc_window *msc_next_window(struct msc_window *win)
0316 {
0317     if (msc_is_last_win(win))
0318         return list_first_entry(&win->msc->win_list, struct msc_window,
0319                     entry);
0320 
0321     return list_next_entry(win, entry);
0322 }
0323 
0324 static size_t msc_win_total_sz(struct msc_window *win)
0325 {
0326     struct scatterlist *sg;
0327     unsigned int blk;
0328     size_t size = 0;
0329 
0330     for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
0331         struct msc_block_desc *bdesc = sg_virt(sg);
0332 
0333         if (msc_block_wrapped(bdesc))
0334             return (size_t)win->nr_blocks << PAGE_SHIFT;
0335 
0336         size += msc_total_sz(bdesc);
0337         if (msc_block_last_written(bdesc))
0338             break;
0339     }
0340 
0341     return size;
0342 }
0343 
0344 /**
0345  * msc_find_window() - find a window matching a given sg_table
0346  * @msc:    MSC device
0347  * @sgt:    SG table of the window
0348  * @nonempty:   skip over empty windows
0349  *
0350  * Return:  MSC window structure pointer or NULL if the window
0351  *      could not be found.
0352  */
0353 static struct msc_window *
0354 msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty)
0355 {
0356     struct msc_window *win;
0357     unsigned int found = 0;
0358 
0359     if (list_empty(&msc->win_list))
0360         return NULL;
0361 
0362     /*
0363      * we might need a radix tree for this, depending on how
0364      * many windows a typical user would allocate; ideally it's
0365      * something like 2, in which case we're good
0366      */
0367     list_for_each_entry(win, &msc->win_list, entry) {
0368         if (win->sgt == sgt)
0369             found++;
0370 
0371         /* skip the empty ones */
0372         if (nonempty && msc_block_is_empty(msc_win_base(win)))
0373             continue;
0374 
0375         if (found)
0376             return win;
0377     }
0378 
0379     return NULL;
0380 }
0381 
0382 /**
0383  * msc_oldest_window() - locate the window with oldest data
0384  * @msc:    MSC device
0385  *
0386  * This should only be used in multiblock mode. Caller should hold the
0387  * msc::user_count reference.
0388  *
0389  * Return:  the oldest window with valid data
0390  */
0391 static struct msc_window *msc_oldest_window(struct msc *msc)
0392 {
0393     struct msc_window *win;
0394 
0395     if (list_empty(&msc->win_list))
0396         return NULL;
0397 
0398     win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true);
0399     if (win)
0400         return win;
0401 
0402     return list_first_entry(&msc->win_list, struct msc_window, entry);
0403 }
0404 
0405 /**
0406  * msc_win_oldest_sg() - locate the oldest block in a given window
0407  * @win:    window to look at
0408  *
0409  * Return:  index of the block with the oldest data
0410  */
0411 static struct scatterlist *msc_win_oldest_sg(struct msc_window *win)
0412 {
0413     unsigned int blk;
0414     struct scatterlist *sg;
0415     struct msc_block_desc *bdesc = msc_win_base(win);
0416 
0417     /* without wrapping, first block is the oldest */
0418     if (!msc_block_wrapped(bdesc))
0419         return msc_win_base_sg(win);
0420 
0421     /*
0422      * with wrapping, last written block contains both the newest and the
0423      * oldest data for this window.
0424      */
0425     for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
0426         struct msc_block_desc *bdesc = sg_virt(sg);
0427 
0428         if (msc_block_last_written(bdesc))
0429             return sg;
0430     }
0431 
0432     return msc_win_base_sg(win);
0433 }
0434 
0435 static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
0436 {
0437     return sg_virt(iter->block);
0438 }
0439 
0440 static struct msc_iter *msc_iter_install(struct msc *msc)
0441 {
0442     struct msc_iter *iter;
0443 
0444     iter = kzalloc(sizeof(*iter), GFP_KERNEL);
0445     if (!iter)
0446         return ERR_PTR(-ENOMEM);
0447 
0448     mutex_lock(&msc->buf_mutex);
0449 
0450     /*
0451      * Reading and tracing are mutually exclusive; if msc is
0452      * enabled, open() will fail; otherwise existing readers
0453      * will prevent enabling the msc and the rest of fops don't
0454      * need to worry about it.
0455      */
0456     if (msc->enabled) {
0457         kfree(iter);
0458         iter = ERR_PTR(-EBUSY);
0459         goto unlock;
0460     }
0461 
0462     iter->msc = msc;
0463 
0464     list_add_tail(&iter->entry, &msc->iter_list);
0465 unlock:
0466     mutex_unlock(&msc->buf_mutex);
0467 
0468     return iter;
0469 }
0470 
0471 static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
0472 {
0473     mutex_lock(&msc->buf_mutex);
0474     list_del(&iter->entry);
0475     mutex_unlock(&msc->buf_mutex);
0476 
0477     kfree(iter);
0478 }
0479 
0480 static void msc_iter_block_start(struct msc_iter *iter)
0481 {
0482     if (iter->start_block)
0483         return;
0484 
0485     iter->start_block = msc_win_oldest_sg(iter->win);
0486     iter->block = iter->start_block;
0487     iter->wrap_count = 0;
0488 
0489     /*
0490      * start with the block with oldest data; if data has wrapped
0491      * in this window, it should be in this block
0492      */
0493     if (msc_block_wrapped(msc_iter_bdesc(iter)))
0494         iter->wrap_count = 2;
0495 
0496 }
0497 
0498 static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
0499 {
0500     /* already started, nothing to do */
0501     if (iter->start_win)
0502         return 0;
0503 
0504     iter->start_win = msc_oldest_window(msc);
0505     if (!iter->start_win)
0506         return -EINVAL;
0507 
0508     iter->win = iter->start_win;
0509     iter->start_block = NULL;
0510 
0511     msc_iter_block_start(iter);
0512 
0513     return 0;
0514 }
0515 
0516 static int msc_iter_win_advance(struct msc_iter *iter)
0517 {
0518     iter->win = msc_next_window(iter->win);
0519     iter->start_block = NULL;
0520 
0521     if (iter->win == iter->start_win) {
0522         iter->eof++;
0523         return 1;
0524     }
0525 
0526     msc_iter_block_start(iter);
0527 
0528     return 0;
0529 }
0530 
0531 static int msc_iter_block_advance(struct msc_iter *iter)
0532 {
0533     iter->block_off = 0;
0534 
0535     /* wrapping */
0536     if (iter->wrap_count && iter->block == iter->start_block) {
0537         iter->wrap_count--;
0538         if (!iter->wrap_count)
0539             /* copied newest data from the wrapped block */
0540             return msc_iter_win_advance(iter);
0541     }
0542 
0543     /* no wrapping, check for last written block */
0544     if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter)))
0545         /* copied newest data for the window */
0546         return msc_iter_win_advance(iter);
0547 
0548     /* block advance */
0549     if (sg_is_last(iter->block))
0550         iter->block = msc_win_base_sg(iter->win);
0551     else
0552         iter->block = sg_next(iter->block);
0553 
0554     /* no wrapping, sanity check in case there is no last written block */
0555     if (!iter->wrap_count && iter->block == iter->start_block)
0556         return msc_iter_win_advance(iter);
0557 
0558     return 0;
0559 }
0560 
0561 /**
0562  * msc_buffer_iterate() - go through multiblock buffer's data
0563  * @iter:   iterator structure
0564  * @size:   amount of data to scan
0565  * @data:   callback's private data
0566  * @fn:     iterator callback
0567  *
0568  * This will start at the window which will be written to next (containing
0569  * the oldest data) and work its way to the current window, calling @fn
0570  * for each chunk of data as it goes.
0571  *
0572  * Caller should have msc::user_count reference to make sure the buffer
0573  * doesn't disappear from under us.
0574  *
0575  * Return:  amount of data actually scanned.
0576  */
0577 static ssize_t
0578 msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data,
0579            unsigned long (*fn)(void *, void *, size_t))
0580 {
0581     struct msc *msc = iter->msc;
0582     size_t len = size;
0583     unsigned int advance;
0584 
0585     if (iter->eof)
0586         return 0;
0587 
0588     /* start with the oldest window */
0589     if (msc_iter_win_start(iter, msc))
0590         return 0;
0591 
0592     do {
0593         unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter));
0594         void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC;
0595         size_t tocopy = data_bytes, copied = 0;
0596         size_t remaining = 0;
0597 
0598         advance = 1;
0599 
0600         /*
0601          * If block wrapping happened, we need to visit the last block
0602          * twice, because it contains both the oldest and the newest
0603          * data in this window.
0604          *
0605          * First time (wrap_count==2), in the very beginning, to collect
0606          * the oldest data, which is in the range
0607          * (data_bytes..DATA_IN_PAGE).
0608          *
0609          * Second time (wrap_count==1), it's just like any other block,
0610          * containing data in the range of [MSC_BDESC..data_bytes].
0611          */
0612         if (iter->block == iter->start_block && iter->wrap_count == 2) {
0613             tocopy = DATA_IN_PAGE - data_bytes;
0614             src += data_bytes;
0615         }
0616 
0617         if (!tocopy)
0618             goto next_block;
0619 
0620         tocopy -= iter->block_off;
0621         src += iter->block_off;
0622 
0623         if (len < tocopy) {
0624             tocopy = len;
0625             advance = 0;
0626         }
0627 
0628         remaining = fn(data, src, tocopy);
0629 
0630         if (remaining)
0631             advance = 0;
0632 
0633         copied = tocopy - remaining;
0634         len -= copied;
0635         iter->block_off += copied;
0636         iter->offset += copied;
0637 
0638         if (!advance)
0639             break;
0640 
0641 next_block:
0642         if (msc_iter_block_advance(iter))
0643             break;
0644 
0645     } while (len);
0646 
0647     return size - len;
0648 }
0649 
0650 /**
0651  * msc_buffer_clear_hw_header() - clear hw header for multiblock
0652  * @msc:    MSC device
0653  */
0654 static void msc_buffer_clear_hw_header(struct msc *msc)
0655 {
0656     struct msc_window *win;
0657     struct scatterlist *sg;
0658 
0659     list_for_each_entry(win, &msc->win_list, entry) {
0660         unsigned int blk;
0661 
0662         for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
0663             struct msc_block_desc *bdesc = sg_virt(sg);
0664 
0665             memset_startat(bdesc, 0, hw_tag);
0666         }
0667     }
0668 }
0669 
0670 static int intel_th_msu_init(struct msc *msc)
0671 {
0672     u32 mintctl, msusts;
0673 
0674     if (!msc->do_irq)
0675         return 0;
0676 
0677     if (!msc->mbuf)
0678         return 0;
0679 
0680     mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
0681     mintctl |= msc->index ? M1BLIE : M0BLIE;
0682     iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
0683     if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) {
0684         dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n");
0685         msc->do_irq = 0;
0686         return 0;
0687     }
0688 
0689     msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
0690     iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
0691 
0692     return 0;
0693 }
0694 
0695 static void intel_th_msu_deinit(struct msc *msc)
0696 {
0697     u32 mintctl;
0698 
0699     if (!msc->do_irq)
0700         return;
0701 
0702     mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
0703     mintctl &= msc->index ? ~M1BLIE : ~M0BLIE;
0704     iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
0705 }
0706 
0707 static int msc_win_set_lockout(struct msc_window *win,
0708                    enum lockout_state expect,
0709                    enum lockout_state new)
0710 {
0711     enum lockout_state old;
0712     unsigned long flags;
0713     int ret = 0;
0714 
0715     if (!win->msc->mbuf)
0716         return 0;
0717 
0718     spin_lock_irqsave(&win->lo_lock, flags);
0719     old = win->lockout;
0720 
0721     if (old != expect) {
0722         ret = -EINVAL;
0723         goto unlock;
0724     }
0725 
0726     win->lockout = new;
0727 
0728     if (old == expect && new == WIN_LOCKED)
0729         atomic_inc(&win->msc->user_count);
0730     else if (old == expect && old == WIN_LOCKED)
0731         atomic_dec(&win->msc->user_count);
0732 
0733 unlock:
0734     spin_unlock_irqrestore(&win->lo_lock, flags);
0735 
0736     if (ret) {
0737         if (expect == WIN_READY && old == WIN_LOCKED)
0738             return -EBUSY;
0739 
0740         /* from intel_th_msc_window_unlock(), don't warn if not locked */
0741         if (expect == WIN_LOCKED && old == new)
0742             return 0;
0743 
0744         dev_warn_ratelimited(msc_dev(win->msc),
0745                      "expected lockout state %d, got %d\n",
0746                      expect, old);
0747     }
0748 
0749     return ret;
0750 }
0751 /**
0752  * msc_configure() - set up MSC hardware
0753  * @msc:    the MSC device to configure
0754  *
0755  * Program storage mode, wrapping, burst length and trace buffer address
0756  * into a given MSC. Then, enable tracing and set msc::enabled.
0757  * The latter is serialized on msc::buf_mutex, so make sure to hold it.
0758  */
0759 static int msc_configure(struct msc *msc)
0760 {
0761     u32 reg;
0762 
0763     lockdep_assert_held(&msc->buf_mutex);
0764 
0765     if (msc->mode > MSC_MODE_MULTI)
0766         return -EINVAL;
0767 
0768     if (msc->mode == MSC_MODE_MULTI) {
0769         if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE))
0770             return -EBUSY;
0771 
0772         msc_buffer_clear_hw_header(msc);
0773     }
0774 
0775     msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR);
0776     msc->orig_sz   = ioread32(msc->reg_base + REG_MSU_MSC0SIZE);
0777 
0778     reg = msc->base_addr >> PAGE_SHIFT;
0779     iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR);
0780 
0781     if (msc->mode == MSC_MODE_SINGLE) {
0782         reg = msc->nr_pages;
0783         iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE);
0784     }
0785 
0786     reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
0787     reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD);
0788 
0789     reg |= MSC_EN;
0790     reg |= msc->mode << __ffs(MSC_MODE);
0791     reg |= msc->burst_len << __ffs(MSC_LEN);
0792 
0793     if (msc->wrap)
0794         reg |= MSC_WRAPEN;
0795 
0796     iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
0797 
0798     intel_th_msu_init(msc);
0799 
0800     msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
0801     intel_th_trace_enable(msc->thdev);
0802     msc->enabled = 1;
0803 
0804     if (msc->mbuf && msc->mbuf->activate)
0805         msc->mbuf->activate(msc->mbuf_priv);
0806 
0807     return 0;
0808 }
0809 
0810 /**
0811  * msc_disable() - disable MSC hardware
0812  * @msc:    MSC device to disable
0813  *
0814  * If @msc is enabled, disable tracing on the switch and then disable MSC
0815  * storage. Caller must hold msc::buf_mutex.
0816  */
0817 static void msc_disable(struct msc *msc)
0818 {
0819     struct msc_window *win = msc->cur_win;
0820     u32 reg;
0821 
0822     lockdep_assert_held(&msc->buf_mutex);
0823 
0824     if (msc->mode == MSC_MODE_MULTI)
0825         msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
0826 
0827     if (msc->mbuf && msc->mbuf->deactivate)
0828         msc->mbuf->deactivate(msc->mbuf_priv);
0829     intel_th_msu_deinit(msc);
0830     intel_th_trace_disable(msc->thdev);
0831 
0832     if (msc->mode == MSC_MODE_SINGLE) {
0833         reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
0834         msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT);
0835 
0836         reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP);
0837         msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1);
0838         dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n",
0839             reg, msc->single_sz, msc->single_wrap);
0840     }
0841 
0842     reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
0843     reg &= ~MSC_EN;
0844     iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
0845 
0846     if (msc->mbuf && msc->mbuf->ready)
0847         msc->mbuf->ready(msc->mbuf_priv, win->sgt,
0848                  msc_win_total_sz(win));
0849 
0850     msc->enabled = 0;
0851 
0852     iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR);
0853     iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE);
0854 
0855     dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n",
0856         ioread32(msc->reg_base + REG_MSU_MSC0NWSA));
0857 
0858     reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
0859     dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg);
0860 
0861     reg = ioread32(msc->reg_base + REG_MSU_MSUSTS);
0862     reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
0863     iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS);
0864 }
0865 
0866 static int intel_th_msc_activate(struct intel_th_device *thdev)
0867 {
0868     struct msc *msc = dev_get_drvdata(&thdev->dev);
0869     int ret = -EBUSY;
0870 
0871     if (!atomic_inc_unless_negative(&msc->user_count))
0872         return -ENODEV;
0873 
0874     mutex_lock(&msc->buf_mutex);
0875 
0876     /* if there are readers, refuse */
0877     if (list_empty(&msc->iter_list))
0878         ret = msc_configure(msc);
0879 
0880     mutex_unlock(&msc->buf_mutex);
0881 
0882     if (ret)
0883         atomic_dec(&msc->user_count);
0884 
0885     return ret;
0886 }
0887 
0888 static void intel_th_msc_deactivate(struct intel_th_device *thdev)
0889 {
0890     struct msc *msc = dev_get_drvdata(&thdev->dev);
0891 
0892     mutex_lock(&msc->buf_mutex);
0893     if (msc->enabled) {
0894         msc_disable(msc);
0895         atomic_dec(&msc->user_count);
0896     }
0897     mutex_unlock(&msc->buf_mutex);
0898 }
0899 
0900 /**
0901  * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode
0902  * @msc:    MSC device
0903  * @size:   allocation size in bytes
0904  *
0905  * This modifies msc::base, which requires msc::buf_mutex to serialize, so the
0906  * caller is expected to hold it.
0907  *
0908  * Return:  0 on success, -errno otherwise.
0909  */
0910 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
0911 {
0912     unsigned long nr_pages = size >> PAGE_SHIFT;
0913     unsigned int order = get_order(size);
0914     struct page *page;
0915     int ret;
0916 
0917     if (!size)
0918         return 0;
0919 
0920     ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
0921     if (ret)
0922         goto err_out;
0923 
0924     ret = -ENOMEM;
0925     page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
0926     if (!page)
0927         goto err_free_sgt;
0928 
0929     split_page(page, order);
0930     sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
0931 
0932     ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
0933              DMA_FROM_DEVICE);
0934     if (ret < 0)
0935         goto err_free_pages;
0936 
0937     msc->nr_pages = nr_pages;
0938     msc->base = page_address(page);
0939     msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
0940 
0941     return 0;
0942 
0943 err_free_pages:
0944     __free_pages(page, order);
0945 
0946 err_free_sgt:
0947     sg_free_table(&msc->single_sgt);
0948 
0949 err_out:
0950     return ret;
0951 }
0952 
0953 /**
0954  * msc_buffer_contig_free() - free a contiguous buffer
0955  * @msc:    MSC configured in SINGLE mode
0956  */
0957 static void msc_buffer_contig_free(struct msc *msc)
0958 {
0959     unsigned long off;
0960 
0961     dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
0962              1, DMA_FROM_DEVICE);
0963     sg_free_table(&msc->single_sgt);
0964 
0965     for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
0966         struct page *page = virt_to_page(msc->base + off);
0967 
0968         page->mapping = NULL;
0969         __free_page(page);
0970     }
0971 
0972     msc->nr_pages = 0;
0973 }
0974 
0975 /**
0976  * msc_buffer_contig_get_page() - find a page at a given offset
0977  * @msc:    MSC configured in SINGLE mode
0978  * @pgoff:  page offset
0979  *
0980  * Return:  page, if @pgoff is within the range, NULL otherwise.
0981  */
0982 static struct page *msc_buffer_contig_get_page(struct msc *msc,
0983                            unsigned long pgoff)
0984 {
0985     if (pgoff >= msc->nr_pages)
0986         return NULL;
0987 
0988     return virt_to_page(msc->base + (pgoff << PAGE_SHIFT));
0989 }
0990 
0991 static int __msc_buffer_win_alloc(struct msc_window *win,
0992                   unsigned int nr_segs)
0993 {
0994     struct scatterlist *sg_ptr;
0995     void *block;
0996     int i, ret;
0997 
0998     ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL);
0999     if (ret)
1000         return -ENOMEM;
1001 
1002     for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
1003         block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent,
1004                       PAGE_SIZE, &sg_dma_address(sg_ptr),
1005                       GFP_KERNEL);
1006         if (!block)
1007             goto err_nomem;
1008 
1009         sg_set_buf(sg_ptr, block, PAGE_SIZE);
1010     }
1011 
1012     return nr_segs;
1013 
1014 err_nomem:
1015     for_each_sg(win->sgt->sgl, sg_ptr, i, ret)
1016         dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
1017                   sg_virt(sg_ptr), sg_dma_address(sg_ptr));
1018 
1019     sg_free_table(win->sgt);
1020 
1021     return -ENOMEM;
1022 }
1023 
1024 #ifdef CONFIG_X86
1025 static void msc_buffer_set_uc(struct msc *msc)
1026 {
1027     struct scatterlist *sg_ptr;
1028     struct msc_window *win;
1029     int i;
1030 
1031     if (msc->mode == MSC_MODE_SINGLE) {
1032         set_memory_uc((unsigned long)msc->base, msc->nr_pages);
1033         return;
1034     }
1035 
1036     list_for_each_entry(win, &msc->win_list, entry) {
1037         for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
1038             /* Set the page as uncached */
1039             set_memory_uc((unsigned long)sg_virt(sg_ptr),
1040                     PFN_DOWN(sg_ptr->length));
1041         }
1042     }
1043 }
1044 
1045 static void msc_buffer_set_wb(struct msc *msc)
1046 {
1047     struct scatterlist *sg_ptr;
1048     struct msc_window *win;
1049     int i;
1050 
1051     if (msc->mode == MSC_MODE_SINGLE) {
1052         set_memory_wb((unsigned long)msc->base, msc->nr_pages);
1053         return;
1054     }
1055 
1056     list_for_each_entry(win, &msc->win_list, entry) {
1057         for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
1058             /* Reset the page to write-back */
1059             set_memory_wb((unsigned long)sg_virt(sg_ptr),
1060                     PFN_DOWN(sg_ptr->length));
1061         }
1062     }
1063 }
1064 #else /* !X86 */
1065 static inline void
1066 msc_buffer_set_uc(struct msc *msc) {}
1067 static inline void msc_buffer_set_wb(struct msc *msc) {}
1068 #endif /* CONFIG_X86 */
1069 
1070 static struct page *msc_sg_page(struct scatterlist *sg)
1071 {
1072     void *addr = sg_virt(sg);
1073 
1074     if (is_vmalloc_addr(addr))
1075         return vmalloc_to_page(addr);
1076 
1077     return sg_page(sg);
1078 }
1079 
1080 /**
1081  * msc_buffer_win_alloc() - alloc a window for a multiblock mode
1082  * @msc:    MSC device
1083  * @nr_blocks:  number of pages in this window
1084  *
1085  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1086  * to serialize, so the caller is expected to hold it.
1087  *
1088  * Return:  0 on success, -errno otherwise.
1089  */
1090 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
1091 {
1092     struct msc_window *win;
1093     int ret = -ENOMEM;
1094 
1095     if (!nr_blocks)
1096         return 0;
1097 
1098     win = kzalloc(sizeof(*win), GFP_KERNEL);
1099     if (!win)
1100         return -ENOMEM;
1101 
1102     win->msc = msc;
1103     win->sgt = &win->_sgt;
1104     win->lockout = WIN_READY;
1105     spin_lock_init(&win->lo_lock);
1106 
1107     if (!list_empty(&msc->win_list)) {
1108         struct msc_window *prev = list_last_entry(&msc->win_list,
1109                               struct msc_window,
1110                               entry);
1111 
1112         win->pgoff = prev->pgoff + prev->nr_blocks;
1113     }
1114 
1115     if (msc->mbuf && msc->mbuf->alloc_window)
1116         ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt,
1117                           nr_blocks << PAGE_SHIFT);
1118     else
1119         ret = __msc_buffer_win_alloc(win, nr_blocks);
1120 
1121     if (ret <= 0)
1122         goto err_nomem;
1123 
1124     win->nr_segs = ret;
1125     win->nr_blocks = nr_blocks;
1126 
1127     if (list_empty(&msc->win_list)) {
1128         msc->base = msc_win_base(win);
1129         msc->base_addr = msc_win_base_dma(win);
1130         msc->cur_win = win;
1131     }
1132 
1133     list_add_tail(&win->entry, &msc->win_list);
1134     msc->nr_pages += nr_blocks;
1135 
1136     return 0;
1137 
1138 err_nomem:
1139     kfree(win);
1140 
1141     return ret;
1142 }
1143 
1144 static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
1145 {
1146     struct scatterlist *sg;
1147     int i;
1148 
1149     for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
1150         struct page *page = msc_sg_page(sg);
1151 
1152         page->mapping = NULL;
1153         dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
1154                   sg_virt(sg), sg_dma_address(sg));
1155     }
1156     sg_free_table(win->sgt);
1157 }
1158 
1159 /**
1160  * msc_buffer_win_free() - free a window from MSC's window list
1161  * @msc:    MSC device
1162  * @win:    window to free
1163  *
1164  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1165  * to serialize, so the caller is expected to hold it.
1166  */
1167 static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
1168 {
1169     msc->nr_pages -= win->nr_blocks;
1170 
1171     list_del(&win->entry);
1172     if (list_empty(&msc->win_list)) {
1173         msc->base = NULL;
1174         msc->base_addr = 0;
1175     }
1176 
1177     if (msc->mbuf && msc->mbuf->free_window)
1178         msc->mbuf->free_window(msc->mbuf_priv, win->sgt);
1179     else
1180         __msc_buffer_win_free(msc, win);
1181 
1182     kfree(win);
1183 }
1184 
1185 /**
1186  * msc_buffer_relink() - set up block descriptors for multiblock mode
1187  * @msc:    MSC device
1188  *
1189  * This traverses msc::win_list, which requires msc::buf_mutex to serialize,
1190  * so the caller is expected to hold it.
1191  */
1192 static void msc_buffer_relink(struct msc *msc)
1193 {
1194     struct msc_window *win, *next_win;
1195 
1196     /* call with msc::mutex locked */
1197     list_for_each_entry(win, &msc->win_list, entry) {
1198         struct scatterlist *sg;
1199         unsigned int blk;
1200         u32 sw_tag = 0;
1201 
1202         /*
1203          * Last window's next_win should point to the first window
1204          * and MSC_SW_TAG_LASTWIN should be set.
1205          */
1206         if (msc_is_last_win(win)) {
1207             sw_tag |= MSC_SW_TAG_LASTWIN;
1208             next_win = list_first_entry(&msc->win_list,
1209                             struct msc_window, entry);
1210         } else {
1211             next_win = list_next_entry(win, entry);
1212         }
1213 
1214         for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
1215             struct msc_block_desc *bdesc = sg_virt(sg);
1216 
1217             memset(bdesc, 0, sizeof(*bdesc));
1218 
1219             bdesc->next_win = msc_win_base_pfn(next_win);
1220 
1221             /*
1222              * Similarly to last window, last block should point
1223              * to the first one.
1224              */
1225             if (blk == win->nr_segs - 1) {
1226                 sw_tag |= MSC_SW_TAG_LASTBLK;
1227                 bdesc->next_blk = msc_win_base_pfn(win);
1228             } else {
1229                 dma_addr_t addr = sg_dma_address(sg_next(sg));
1230 
1231                 bdesc->next_blk = PFN_DOWN(addr);
1232             }
1233 
1234             bdesc->sw_tag = sw_tag;
1235             bdesc->block_sz = sg->length / 64;
1236         }
1237     }
1238 
1239     /*
1240      * Make the above writes globally visible before tracing is
1241      * enabled to make sure hardware sees them coherently.
1242      */
1243     wmb();
1244 }
1245 
1246 static void msc_buffer_multi_free(struct msc *msc)
1247 {
1248     struct msc_window *win, *iter;
1249 
1250     list_for_each_entry_safe(win, iter, &msc->win_list, entry)
1251         msc_buffer_win_free(msc, win);
1252 }
1253 
1254 static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages,
1255                   unsigned int nr_wins)
1256 {
1257     int ret, i;
1258 
1259     for (i = 0; i < nr_wins; i++) {
1260         ret = msc_buffer_win_alloc(msc, nr_pages[i]);
1261         if (ret) {
1262             msc_buffer_multi_free(msc);
1263             return ret;
1264         }
1265     }
1266 
1267     msc_buffer_relink(msc);
1268 
1269     return 0;
1270 }
1271 
1272 /**
1273  * msc_buffer_free() - free buffers for MSC
1274  * @msc:    MSC device
1275  *
1276  * Free MSC's storage buffers.
1277  *
1278  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to
1279  * serialize, so the caller is expected to hold it.
1280  */
1281 static void msc_buffer_free(struct msc *msc)
1282 {
1283     msc_buffer_set_wb(msc);
1284 
1285     if (msc->mode == MSC_MODE_SINGLE)
1286         msc_buffer_contig_free(msc);
1287     else if (msc->mode == MSC_MODE_MULTI)
1288         msc_buffer_multi_free(msc);
1289 }
1290 
1291 /**
1292  * msc_buffer_alloc() - allocate a buffer for MSC
1293  * @msc:    MSC device
1294  * @size:   allocation size in bytes
1295  *
1296  * Allocate a storage buffer for MSC, depending on the msc::mode, it will be
1297  * either done via msc_buffer_contig_alloc() for SINGLE operation mode or
1298  * msc_buffer_win_alloc() for multiblock operation. The latter allocates one
1299  * window per invocation, so in multiblock mode this can be called multiple
1300  * times for the same MSC to allocate multiple windows.
1301  *
1302  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1303  * to serialize, so the caller is expected to hold it.
1304  *
1305  * Return:  0 on success, -errno otherwise.
1306  */
1307 static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
1308                 unsigned int nr_wins)
1309 {
1310     int ret;
1311 
1312     /* -1: buffer not allocated */
1313     if (atomic_read(&msc->user_count) != -1)
1314         return -EBUSY;
1315 
1316     if (msc->mode == MSC_MODE_SINGLE) {
1317         if (nr_wins != 1)
1318             return -EINVAL;
1319 
1320         ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT);
1321     } else if (msc->mode == MSC_MODE_MULTI) {
1322         ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
1323     } else {
1324         ret = -EINVAL;
1325     }
1326 
1327     if (!ret) {
1328         msc_buffer_set_uc(msc);
1329 
1330         /* allocation should be visible before the counter goes to 0 */
1331         smp_mb__before_atomic();
1332 
1333         if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1))
1334             return -EINVAL;
1335     }
1336 
1337     return ret;
1338 }
1339 
1340 /**
1341  * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use
1342  * @msc:    MSC device
1343  *
1344  * This will free MSC buffer unless it is in use or there is no allocated
1345  * buffer.
1346  * Caller needs to hold msc::buf_mutex.
1347  *
1348  * Return:  0 on successful deallocation or if there was no buffer to
1349  *      deallocate, -EBUSY if there are active users.
1350  */
1351 static int msc_buffer_unlocked_free_unless_used(struct msc *msc)
1352 {
1353     int count, ret = 0;
1354 
1355     count = atomic_cmpxchg(&msc->user_count, 0, -1);
1356 
1357     /* > 0: buffer is allocated and has users */
1358     if (count > 0)
1359         ret = -EBUSY;
1360     /* 0: buffer is allocated, no users */
1361     else if (!count)
1362         msc_buffer_free(msc);
1363     /* < 0: no buffer, nothing to do */
1364 
1365     return ret;
1366 }
1367 
1368 /**
1369  * msc_buffer_free_unless_used() - free a buffer unless it's in use
1370  * @msc:    MSC device
1371  *
1372  * This is a locked version of msc_buffer_unlocked_free_unless_used().
1373  */
1374 static int msc_buffer_free_unless_used(struct msc *msc)
1375 {
1376     int ret;
1377 
1378     mutex_lock(&msc->buf_mutex);
1379     ret = msc_buffer_unlocked_free_unless_used(msc);
1380     mutex_unlock(&msc->buf_mutex);
1381 
1382     return ret;
1383 }
1384 
1385 /**
1386  * msc_buffer_get_page() - get MSC buffer page at a given offset
1387  * @msc:    MSC device
1388  * @pgoff:  page offset into the storage buffer
1389  *
1390  * This traverses msc::win_list, so holding msc::buf_mutex is expected from
1391  * the caller.
1392  *
1393  * Return:  page if @pgoff corresponds to a valid buffer page or NULL.
1394  */
1395 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
1396 {
1397     struct msc_window *win;
1398     struct scatterlist *sg;
1399     unsigned int blk;
1400 
1401     if (msc->mode == MSC_MODE_SINGLE)
1402         return msc_buffer_contig_get_page(msc, pgoff);
1403 
1404     list_for_each_entry(win, &msc->win_list, entry)
1405         if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks)
1406             goto found;
1407 
1408     return NULL;
1409 
1410 found:
1411     pgoff -= win->pgoff;
1412 
1413     for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
1414         struct page *page = msc_sg_page(sg);
1415         size_t pgsz = PFN_DOWN(sg->length);
1416 
1417         if (pgoff < pgsz)
1418             return page + pgoff;
1419 
1420         pgoff -= pgsz;
1421     }
1422 
1423     return NULL;
1424 }
1425 
1426 /**
1427  * struct msc_win_to_user_struct - data for copy_to_user() callback
1428  * @buf:    userspace buffer to copy data to
1429  * @offset: running offset
1430  */
1431 struct msc_win_to_user_struct {
1432     char __user *buf;
1433     unsigned long   offset;
1434 };
1435 
1436 /**
1437  * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user
1438  * @data:   callback's private data
1439  * @src:    source buffer
1440  * @len:    amount of data to copy from the source buffer
1441  */
1442 static unsigned long msc_win_to_user(void *data, void *src, size_t len)
1443 {
1444     struct msc_win_to_user_struct *u = data;
1445     unsigned long ret;
1446 
1447     ret = copy_to_user(u->buf + u->offset, src, len);
1448     u->offset += len - ret;
1449 
1450     return ret;
1451 }
1452 
1453 
1454 /*
1455  * file operations' callbacks
1456  */
1457 
1458 static int intel_th_msc_open(struct inode *inode, struct file *file)
1459 {
1460     struct intel_th_device *thdev = file->private_data;
1461     struct msc *msc = dev_get_drvdata(&thdev->dev);
1462     struct msc_iter *iter;
1463 
1464     if (!capable(CAP_SYS_RAWIO))
1465         return -EPERM;
1466 
1467     iter = msc_iter_install(msc);
1468     if (IS_ERR(iter))
1469         return PTR_ERR(iter);
1470 
1471     file->private_data = iter;
1472 
1473     return nonseekable_open(inode, file);
1474 }
1475 
1476 static int intel_th_msc_release(struct inode *inode, struct file *file)
1477 {
1478     struct msc_iter *iter = file->private_data;
1479     struct msc *msc = iter->msc;
1480 
1481     msc_iter_remove(iter, msc);
1482 
1483     return 0;
1484 }
1485 
1486 static ssize_t
1487 msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len)
1488 {
1489     unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len;
1490     unsigned long start = off, tocopy = 0;
1491 
1492     if (msc->single_wrap) {
1493         start += msc->single_sz;
1494         if (start < size) {
1495             tocopy = min(rem, size - start);
1496             if (copy_to_user(buf, msc->base + start, tocopy))
1497                 return -EFAULT;
1498 
1499             buf += tocopy;
1500             rem -= tocopy;
1501             start += tocopy;
1502         }
1503 
1504         start &= size - 1;
1505         if (rem) {
1506             tocopy = min(rem, msc->single_sz - start);
1507             if (copy_to_user(buf, msc->base + start, tocopy))
1508                 return -EFAULT;
1509 
1510             rem -= tocopy;
1511         }
1512 
1513         return len - rem;
1514     }
1515 
1516     if (copy_to_user(buf, msc->base + start, rem))
1517         return -EFAULT;
1518 
1519     return len;
1520 }
1521 
1522 static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
1523                  size_t len, loff_t *ppos)
1524 {
1525     struct msc_iter *iter = file->private_data;
1526     struct msc *msc = iter->msc;
1527     size_t size;
1528     loff_t off = *ppos;
1529     ssize_t ret = 0;
1530 
1531     if (!atomic_inc_unless_negative(&msc->user_count))
1532         return 0;
1533 
1534     if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap)
1535         size = msc->single_sz;
1536     else
1537         size = msc->nr_pages << PAGE_SHIFT;
1538 
1539     if (!size)
1540         goto put_count;
1541 
1542     if (off >= size)
1543         goto put_count;
1544 
1545     if (off + len >= size)
1546         len = size - off;
1547 
1548     if (msc->mode == MSC_MODE_SINGLE) {
1549         ret = msc_single_to_user(msc, buf, off, len);
1550         if (ret >= 0)
1551             *ppos += ret;
1552     } else if (msc->mode == MSC_MODE_MULTI) {
1553         struct msc_win_to_user_struct u = {
1554             .buf    = buf,
1555             .offset = 0,
1556         };
1557 
1558         ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user);
1559         if (ret >= 0)
1560             *ppos = iter->offset;
1561     } else {
1562         ret = -EINVAL;
1563     }
1564 
1565 put_count:
1566     atomic_dec(&msc->user_count);
1567 
1568     return ret;
1569 }
1570 
1571 /*
1572  * vm operations callbacks (vm_ops)
1573  */
1574 
1575 static void msc_mmap_open(struct vm_area_struct *vma)
1576 {
1577     struct msc_iter *iter = vma->vm_file->private_data;
1578     struct msc *msc = iter->msc;
1579 
1580     atomic_inc(&msc->mmap_count);
1581 }
1582 
1583 static void msc_mmap_close(struct vm_area_struct *vma)
1584 {
1585     struct msc_iter *iter = vma->vm_file->private_data;
1586     struct msc *msc = iter->msc;
1587     unsigned long pg;
1588 
1589     if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
1590         return;
1591 
1592     /* drop page _refcounts */
1593     for (pg = 0; pg < msc->nr_pages; pg++) {
1594         struct page *page = msc_buffer_get_page(msc, pg);
1595 
1596         if (WARN_ON_ONCE(!page))
1597             continue;
1598 
1599         if (page->mapping)
1600             page->mapping = NULL;
1601     }
1602 
1603     /* last mapping -- drop user_count */
1604     atomic_dec(&msc->user_count);
1605     mutex_unlock(&msc->buf_mutex);
1606 }
1607 
1608 static vm_fault_t msc_mmap_fault(struct vm_fault *vmf)
1609 {
1610     struct msc_iter *iter = vmf->vma->vm_file->private_data;
1611     struct msc *msc = iter->msc;
1612 
1613     vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
1614     if (!vmf->page)
1615         return VM_FAULT_SIGBUS;
1616 
1617     get_page(vmf->page);
1618     vmf->page->mapping = vmf->vma->vm_file->f_mapping;
1619     vmf->page->index = vmf->pgoff;
1620 
1621     return 0;
1622 }
1623 
1624 static const struct vm_operations_struct msc_mmap_ops = {
1625     .open   = msc_mmap_open,
1626     .close  = msc_mmap_close,
1627     .fault  = msc_mmap_fault,
1628 };
1629 
1630 static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma)
1631 {
1632     unsigned long size = vma->vm_end - vma->vm_start;
1633     struct msc_iter *iter = vma->vm_file->private_data;
1634     struct msc *msc = iter->msc;
1635     int ret = -EINVAL;
1636 
1637     if (!size || offset_in_page(size))
1638         return -EINVAL;
1639 
1640     if (vma->vm_pgoff)
1641         return -EINVAL;
1642 
1643     /* grab user_count once per mmap; drop in msc_mmap_close() */
1644     if (!atomic_inc_unless_negative(&msc->user_count))
1645         return -EINVAL;
1646 
1647     if (msc->mode != MSC_MODE_SINGLE &&
1648         msc->mode != MSC_MODE_MULTI)
1649         goto out;
1650 
1651     if (size >> PAGE_SHIFT != msc->nr_pages)
1652         goto out;
1653 
1654     atomic_set(&msc->mmap_count, 1);
1655     ret = 0;
1656 
1657 out:
1658     if (ret)
1659         atomic_dec(&msc->user_count);
1660 
1661     vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1662     vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
1663     vma->vm_ops = &msc_mmap_ops;
1664     return ret;
1665 }
1666 
1667 static const struct file_operations intel_th_msc_fops = {
1668     .open       = intel_th_msc_open,
1669     .release    = intel_th_msc_release,
1670     .read       = intel_th_msc_read,
1671     .mmap       = intel_th_msc_mmap,
1672     .llseek     = no_llseek,
1673     .owner      = THIS_MODULE,
1674 };
1675 
1676 static void intel_th_msc_wait_empty(struct intel_th_device *thdev)
1677 {
1678     struct msc *msc = dev_get_drvdata(&thdev->dev);
1679     unsigned long count;
1680     u32 reg;
1681 
1682     for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
1683          count && !(reg & MSCSTS_PLE); count--) {
1684         reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS);
1685         cpu_relax();
1686     }
1687 
1688     if (!count)
1689         dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
1690 }
1691 
1692 static int intel_th_msc_init(struct msc *msc)
1693 {
1694     atomic_set(&msc->user_count, -1);
1695 
1696     msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI;
1697     mutex_init(&msc->buf_mutex);
1698     INIT_LIST_HEAD(&msc->win_list);
1699     INIT_LIST_HEAD(&msc->iter_list);
1700 
1701     msc->burst_len =
1702         (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >>
1703         __ffs(MSC_LEN);
1704 
1705     return 0;
1706 }
1707 
1708 static int msc_win_switch(struct msc *msc)
1709 {
1710     struct msc_window *first;
1711 
1712     if (list_empty(&msc->win_list))
1713         return -EINVAL;
1714 
1715     first = list_first_entry(&msc->win_list, struct msc_window, entry);
1716 
1717     if (msc_is_last_win(msc->cur_win))
1718         msc->cur_win = first;
1719     else
1720         msc->cur_win = list_next_entry(msc->cur_win, entry);
1721 
1722     msc->base = msc_win_base(msc->cur_win);
1723     msc->base_addr = msc_win_base_dma(msc->cur_win);
1724 
1725     intel_th_trace_switch(msc->thdev);
1726 
1727     return 0;
1728 }
1729 
1730 /**
1731  * intel_th_msc_window_unlock - put the window back in rotation
1732  * @dev:    MSC device to which this relates
1733  * @sgt:    buffer's sg_table for the window, does nothing if NULL
1734  */
1735 void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt)
1736 {
1737     struct msc *msc = dev_get_drvdata(dev);
1738     struct msc_window *win;
1739 
1740     if (!sgt)
1741         return;
1742 
1743     win = msc_find_window(msc, sgt, false);
1744     if (!win)
1745         return;
1746 
1747     msc_win_set_lockout(win, WIN_LOCKED, WIN_READY);
1748     if (msc->switch_on_unlock == win) {
1749         msc->switch_on_unlock = NULL;
1750         msc_win_switch(msc);
1751     }
1752 }
1753 EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock);
1754 
1755 static void msc_work(struct work_struct *work)
1756 {
1757     struct msc *msc = container_of(work, struct msc, work);
1758 
1759     intel_th_msc_deactivate(msc->thdev);
1760 }
1761 
1762 static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
1763 {
1764     struct msc *msc = dev_get_drvdata(&thdev->dev);
1765     u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
1766     u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
1767     struct msc_window *win, *next_win;
1768 
1769     if (!msc->do_irq || !msc->mbuf)
1770         return IRQ_NONE;
1771 
1772     msusts &= mask;
1773 
1774     if (!msusts)
1775         return msc->enabled ? IRQ_HANDLED : IRQ_NONE;
1776 
1777     iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
1778 
1779     if (!msc->enabled)
1780         return IRQ_NONE;
1781 
1782     /* grab the window before we do the switch */
1783     win = msc->cur_win;
1784     if (!win)
1785         return IRQ_HANDLED;
1786     next_win = msc_next_window(win);
1787     if (!next_win)
1788         return IRQ_HANDLED;
1789 
1790     /* next window: if READY, proceed, if LOCKED, stop the trace */
1791     if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) {
1792         if (msc->stop_on_full)
1793             schedule_work(&msc->work);
1794         else
1795             msc->switch_on_unlock = next_win;
1796 
1797         return IRQ_HANDLED;
1798     }
1799 
1800     /* current window: INUSE -> LOCKED */
1801     msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
1802 
1803     msc_win_switch(msc);
1804 
1805     if (msc->mbuf && msc->mbuf->ready)
1806         msc->mbuf->ready(msc->mbuf_priv, win->sgt,
1807                  msc_win_total_sz(win));
1808 
1809     return IRQ_HANDLED;
1810 }
1811 
1812 static const char * const msc_mode[] = {
1813     [MSC_MODE_SINGLE]   = "single",
1814     [MSC_MODE_MULTI]    = "multi",
1815     [MSC_MODE_EXI]      = "ExI",
1816     [MSC_MODE_DEBUG]    = "debug",
1817 };
1818 
1819 static ssize_t
1820 wrap_show(struct device *dev, struct device_attribute *attr, char *buf)
1821 {
1822     struct msc *msc = dev_get_drvdata(dev);
1823 
1824     return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap);
1825 }
1826 
1827 static ssize_t
1828 wrap_store(struct device *dev, struct device_attribute *attr, const char *buf,
1829        size_t size)
1830 {
1831     struct msc *msc = dev_get_drvdata(dev);
1832     unsigned long val;
1833     int ret;
1834 
1835     ret = kstrtoul(buf, 10, &val);
1836     if (ret)
1837         return ret;
1838 
1839     msc->wrap = !!val;
1840 
1841     return size;
1842 }
1843 
1844 static DEVICE_ATTR_RW(wrap);
1845 
1846 static void msc_buffer_unassign(struct msc *msc)
1847 {
1848     lockdep_assert_held(&msc->buf_mutex);
1849 
1850     if (!msc->mbuf)
1851         return;
1852 
1853     msc->mbuf->unassign(msc->mbuf_priv);
1854     msu_buffer_put(msc->mbuf);
1855     msc->mbuf_priv = NULL;
1856     msc->mbuf = NULL;
1857 }
1858 
1859 static ssize_t
1860 mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1861 {
1862     struct msc *msc = dev_get_drvdata(dev);
1863     const char *mode = msc_mode[msc->mode];
1864     ssize_t ret;
1865 
1866     mutex_lock(&msc->buf_mutex);
1867     if (msc->mbuf)
1868         mode = msc->mbuf->name;
1869     ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode);
1870     mutex_unlock(&msc->buf_mutex);
1871 
1872     return ret;
1873 }
1874 
1875 static ssize_t
1876 mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
1877        size_t size)
1878 {
1879     const struct msu_buffer *mbuf = NULL;
1880     struct msc *msc = dev_get_drvdata(dev);
1881     size_t len = size;
1882     char *cp, *mode;
1883     int i, ret;
1884 
1885     if (!capable(CAP_SYS_RAWIO))
1886         return -EPERM;
1887 
1888     cp = memchr(buf, '\n', len);
1889     if (cp)
1890         len = cp - buf;
1891 
1892     mode = kstrndup(buf, len, GFP_KERNEL);
1893     if (!mode)
1894         return -ENOMEM;
1895 
1896     i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode);
1897     if (i >= 0) {
1898         kfree(mode);
1899         goto found;
1900     }
1901 
1902     /* Buffer sinks only work with a usable IRQ */
1903     if (!msc->do_irq) {
1904         kfree(mode);
1905         return -EINVAL;
1906     }
1907 
1908     mbuf = msu_buffer_get(mode);
1909     kfree(mode);
1910     if (mbuf)
1911         goto found;
1912 
1913     return -EINVAL;
1914 
1915 found:
1916     if (i == MSC_MODE_MULTI && msc->multi_is_broken)
1917         return -EOPNOTSUPP;
1918 
1919     mutex_lock(&msc->buf_mutex);
1920     ret = 0;
1921 
1922     /* Same buffer: do nothing */
1923     if (mbuf && mbuf == msc->mbuf) {
1924         /* put the extra reference we just got */
1925         msu_buffer_put(mbuf);
1926         goto unlock;
1927     }
1928 
1929     ret = msc_buffer_unlocked_free_unless_used(msc);
1930     if (ret)
1931         goto unlock;
1932 
1933     if (mbuf) {
1934         void *mbuf_priv = mbuf->assign(dev, &i);
1935 
1936         if (!mbuf_priv) {
1937             ret = -ENOMEM;
1938             goto unlock;
1939         }
1940 
1941         msc_buffer_unassign(msc);
1942         msc->mbuf_priv = mbuf_priv;
1943         msc->mbuf = mbuf;
1944     } else {
1945         msc_buffer_unassign(msc);
1946     }
1947 
1948     msc->mode = i;
1949 
1950 unlock:
1951     if (ret && mbuf)
1952         msu_buffer_put(mbuf);
1953     mutex_unlock(&msc->buf_mutex);
1954 
1955     return ret ? ret : size;
1956 }
1957 
1958 static DEVICE_ATTR_RW(mode);
1959 
1960 static ssize_t
1961 nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf)
1962 {
1963     struct msc *msc = dev_get_drvdata(dev);
1964     struct msc_window *win;
1965     size_t count = 0;
1966 
1967     mutex_lock(&msc->buf_mutex);
1968 
1969     if (msc->mode == MSC_MODE_SINGLE)
1970         count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages);
1971     else if (msc->mode == MSC_MODE_MULTI) {
1972         list_for_each_entry(win, &msc->win_list, entry) {
1973             count += scnprintf(buf + count, PAGE_SIZE - count,
1974                        "%d%c", win->nr_blocks,
1975                        msc_is_last_win(win) ? '\n' : ',');
1976         }
1977     } else {
1978         count = scnprintf(buf, PAGE_SIZE, "unsupported\n");
1979     }
1980 
1981     mutex_unlock(&msc->buf_mutex);
1982 
1983     return count;
1984 }
1985 
1986 static ssize_t
1987 nr_pages_store(struct device *dev, struct device_attribute *attr,
1988            const char *buf, size_t size)
1989 {
1990     struct msc *msc = dev_get_drvdata(dev);
1991     unsigned long val, *win = NULL, *rewin;
1992     size_t len = size;
1993     const char *p = buf;
1994     char *end, *s;
1995     int ret, nr_wins = 0;
1996 
1997     if (!capable(CAP_SYS_RAWIO))
1998         return -EPERM;
1999 
2000     ret = msc_buffer_free_unless_used(msc);
2001     if (ret)
2002         return ret;
2003 
2004     /* scan the comma-separated list of allocation sizes */
2005     end = memchr(buf, '\n', len);
2006     if (end)
2007         len = end - buf;
2008 
2009     do {
2010         end = memchr(p, ',', len);
2011         s = kstrndup(p, end ? end - p : len, GFP_KERNEL);
2012         if (!s) {
2013             ret = -ENOMEM;
2014             goto free_win;
2015         }
2016 
2017         ret = kstrtoul(s, 10, &val);
2018         kfree(s);
2019 
2020         if (ret || !val)
2021             goto free_win;
2022 
2023         if (nr_wins && msc->mode == MSC_MODE_SINGLE) {
2024             ret = -EINVAL;
2025             goto free_win;
2026         }
2027 
2028         nr_wins++;
2029         rewin = krealloc_array(win, nr_wins, sizeof(*win), GFP_KERNEL);
2030         if (!rewin) {
2031             kfree(win);
2032             return -ENOMEM;
2033         }
2034 
2035         win = rewin;
2036         win[nr_wins - 1] = val;
2037 
2038         if (!end)
2039             break;
2040 
2041         /* consume the number and the following comma, hence +1 */
2042         len -= end - p + 1;
2043         p = end + 1;
2044     } while (len);
2045 
2046     mutex_lock(&msc->buf_mutex);
2047     ret = msc_buffer_alloc(msc, win, nr_wins);
2048     mutex_unlock(&msc->buf_mutex);
2049 
2050 free_win:
2051     kfree(win);
2052 
2053     return ret ? ret : size;
2054 }
2055 
2056 static DEVICE_ATTR_RW(nr_pages);
2057 
2058 static ssize_t
2059 win_switch_store(struct device *dev, struct device_attribute *attr,
2060          const char *buf, size_t size)
2061 {
2062     struct msc *msc = dev_get_drvdata(dev);
2063     unsigned long val;
2064     int ret;
2065 
2066     ret = kstrtoul(buf, 10, &val);
2067     if (ret)
2068         return ret;
2069 
2070     if (val != 1)
2071         return -EINVAL;
2072 
2073     ret = -EINVAL;
2074     mutex_lock(&msc->buf_mutex);
2075     /*
2076      * Window switch can only happen in the "multi" mode.
2077      * If a external buffer is engaged, they have the full
2078      * control over window switching.
2079      */
2080     if (msc->mode == MSC_MODE_MULTI && !msc->mbuf)
2081         ret = msc_win_switch(msc);
2082     mutex_unlock(&msc->buf_mutex);
2083 
2084     return ret ? ret : size;
2085 }
2086 
2087 static DEVICE_ATTR_WO(win_switch);
2088 
2089 static ssize_t stop_on_full_show(struct device *dev,
2090                  struct device_attribute *attr, char *buf)
2091 {
2092     struct msc *msc = dev_get_drvdata(dev);
2093 
2094     return sprintf(buf, "%d\n", msc->stop_on_full);
2095 }
2096 
2097 static ssize_t stop_on_full_store(struct device *dev,
2098                   struct device_attribute *attr,
2099                   const char *buf, size_t size)
2100 {
2101     struct msc *msc = dev_get_drvdata(dev);
2102     int ret;
2103 
2104     ret = kstrtobool(buf, &msc->stop_on_full);
2105     if (ret)
2106         return ret;
2107 
2108     return size;
2109 }
2110 
2111 static DEVICE_ATTR_RW(stop_on_full);
2112 
2113 static struct attribute *msc_output_attrs[] = {
2114     &dev_attr_wrap.attr,
2115     &dev_attr_mode.attr,
2116     &dev_attr_nr_pages.attr,
2117     &dev_attr_win_switch.attr,
2118     &dev_attr_stop_on_full.attr,
2119     NULL,
2120 };
2121 
2122 static const struct attribute_group msc_output_group = {
2123     .attrs  = msc_output_attrs,
2124 };
2125 
2126 static int intel_th_msc_probe(struct intel_th_device *thdev)
2127 {
2128     struct device *dev = &thdev->dev;
2129     struct resource *res;
2130     struct msc *msc;
2131     void __iomem *base;
2132     int err;
2133 
2134     res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
2135     if (!res)
2136         return -ENODEV;
2137 
2138     base = devm_ioremap(dev, res->start, resource_size(res));
2139     if (!base)
2140         return -ENOMEM;
2141 
2142     msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL);
2143     if (!msc)
2144         return -ENOMEM;
2145 
2146     res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1);
2147     if (!res)
2148         msc->do_irq = 1;
2149 
2150     if (INTEL_TH_CAP(to_intel_th(thdev), multi_is_broken))
2151         msc->multi_is_broken = 1;
2152 
2153     msc->index = thdev->id;
2154 
2155     msc->thdev = thdev;
2156     msc->reg_base = base + msc->index * 0x100;
2157     msc->msu_base = base;
2158 
2159     INIT_WORK(&msc->work, msc_work);
2160     err = intel_th_msc_init(msc);
2161     if (err)
2162         return err;
2163 
2164     dev_set_drvdata(dev, msc);
2165 
2166     return 0;
2167 }
2168 
2169 static void intel_th_msc_remove(struct intel_th_device *thdev)
2170 {
2171     struct msc *msc = dev_get_drvdata(&thdev->dev);
2172     int ret;
2173 
2174     intel_th_msc_deactivate(thdev);
2175 
2176     /*
2177      * Buffers should not be used at this point except if the
2178      * output character device is still open and the parent
2179      * device gets detached from its bus, which is a FIXME.
2180      */
2181     ret = msc_buffer_free_unless_used(msc);
2182     WARN_ON_ONCE(ret);
2183 }
2184 
2185 static struct intel_th_driver intel_th_msc_driver = {
2186     .probe  = intel_th_msc_probe,
2187     .remove = intel_th_msc_remove,
2188     .irq        = intel_th_msc_interrupt,
2189     .wait_empty = intel_th_msc_wait_empty,
2190     .activate   = intel_th_msc_activate,
2191     .deactivate = intel_th_msc_deactivate,
2192     .fops   = &intel_th_msc_fops,
2193     .attr_group = &msc_output_group,
2194     .driver = {
2195         .name   = "msc",
2196         .owner  = THIS_MODULE,
2197     },
2198 };
2199 
2200 module_driver(intel_th_msc_driver,
2201           intel_th_driver_register,
2202           intel_th_driver_unregister);
2203 
2204 MODULE_LICENSE("GPL v2");
2205 MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver");
2206 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");