Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
0004  *  Copyright (c) by Takashi Iwai <tiwai@suse.de>
0005  *
0006  *  EMU10K1 memory page allocation (PTB area)
0007  */
0008 
0009 #include <linux/pci.h>
0010 #include <linux/gfp.h>
0011 #include <linux/time.h>
0012 #include <linux/mutex.h>
0013 #include <linux/export.h>
0014 
0015 #include <sound/core.h>
0016 #include <sound/emu10k1.h>
0017 
0018 /* page arguments of these two macros are Emu page (4096 bytes), not like
0019  * aligned pages in others
0020  */
0021 #define __set_ptb_entry(emu,page,addr) \
0022     (((__le32 *)(emu)->ptb_pages.area)[page] = \
0023      cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
0024 #define __get_ptb_entry(emu, page) \
0025     (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
0026 
0027 #define UNIT_PAGES      (PAGE_SIZE / EMUPAGESIZE)
0028 #define MAX_ALIGN_PAGES0        (MAXPAGES0 / UNIT_PAGES)
0029 #define MAX_ALIGN_PAGES1        (MAXPAGES1 / UNIT_PAGES)
0030 /* get aligned page from offset address */
0031 #define get_aligned_page(offset)    ((offset) >> PAGE_SHIFT)
0032 /* get offset address from aligned page */
0033 #define aligned_page_offset(page)   ((page) << PAGE_SHIFT)
0034 
0035 #if PAGE_SIZE == EMUPAGESIZE && !IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
0036 /* fill PTB entrie(s) corresponding to page with addr */
0037 #define set_ptb_entry(emu,page,addr)    __set_ptb_entry(emu,page,addr)
0038 /* fill PTB entrie(s) corresponding to page with silence pointer */
0039 #define set_silent_ptb(emu,page)    __set_ptb_entry(emu,page,emu->silent_page.addr)
0040 #else
0041 /* fill PTB entries -- we need to fill UNIT_PAGES entries */
0042 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
0043 {
0044     int i;
0045     page *= UNIT_PAGES;
0046     for (i = 0; i < UNIT_PAGES; i++, page++) {
0047         __set_ptb_entry(emu, page, addr);
0048         dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page,
0049             (unsigned int)__get_ptb_entry(emu, page));
0050         addr += EMUPAGESIZE;
0051     }
0052 }
0053 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
0054 {
0055     int i;
0056     page *= UNIT_PAGES;
0057     for (i = 0; i < UNIT_PAGES; i++, page++) {
0058         /* do not increment ptr */
0059         __set_ptb_entry(emu, page, emu->silent_page.addr);
0060         dev_dbg(emu->card->dev, "mapped silent page %d to entry %.8x\n",
0061             page, (unsigned int)__get_ptb_entry(emu, page));
0062     }
0063 }
0064 #endif /* PAGE_SIZE */
0065 
0066 
0067 /*
0068  */
0069 static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
0070 static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
0071 
0072 #define get_emu10k1_memblk(l,member)    list_entry(l, struct snd_emu10k1_memblk, member)
0073 
0074 
0075 /* initialize emu10k1 part */
0076 static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
0077 {
0078     blk->mapped_page = -1;
0079     INIT_LIST_HEAD(&blk->mapped_link);
0080     INIT_LIST_HEAD(&blk->mapped_order_link);
0081     blk->map_locked = 0;
0082 
0083     blk->first_page = get_aligned_page(blk->mem.offset);
0084     blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
0085     blk->pages = blk->last_page - blk->first_page + 1;
0086 }
0087 
0088 /*
0089  * search empty region on PTB with the given size
0090  *
0091  * if an empty region is found, return the page and store the next mapped block
0092  * in nextp
0093  * if not found, return a negative error code.
0094  */
0095 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
0096 {
0097     int page = 1, found_page = -ENOMEM;
0098     int max_size = npages;
0099     int size;
0100     struct list_head *candidate = &emu->mapped_link_head;
0101     struct list_head *pos;
0102 
0103     list_for_each (pos, &emu->mapped_link_head) {
0104         struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
0105         if (blk->mapped_page < 0)
0106             continue;
0107         size = blk->mapped_page - page;
0108         if (size == npages) {
0109             *nextp = pos;
0110             return page;
0111         }
0112         else if (size > max_size) {
0113             /* we look for the maximum empty hole */
0114             max_size = size;
0115             candidate = pos;
0116             found_page = page;
0117         }
0118         page = blk->mapped_page + blk->pages;
0119     }
0120     size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
0121     if (size >= max_size) {
0122         *nextp = pos;
0123         return page;
0124     }
0125     *nextp = candidate;
0126     return found_page;
0127 }
0128 
0129 /*
0130  * map a memory block onto emu10k1's PTB
0131  *
0132  * call with memblk_lock held
0133  */
0134 static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
0135 {
0136     int page, pg;
0137     struct list_head *next;
0138 
0139     page = search_empty_map_area(emu, blk->pages, &next);
0140     if (page < 0) /* not found */
0141         return page;
0142     if (page == 0) {
0143         dev_err(emu->card->dev, "trying to map zero (reserved) page\n");
0144         return -EINVAL;
0145     }
0146     /* insert this block in the proper position of mapped list */
0147     list_add_tail(&blk->mapped_link, next);
0148     /* append this as a newest block in order list */
0149     list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
0150     blk->mapped_page = page;
0151     /* fill PTB */
0152     for (pg = blk->first_page; pg <= blk->last_page; pg++) {
0153         set_ptb_entry(emu, page, emu->page_addr_table[pg]);
0154         page++;
0155     }
0156     return 0;
0157 }
0158 
0159 /*
0160  * unmap the block
0161  * return the size of resultant empty pages
0162  *
0163  * call with memblk_lock held
0164  */
0165 static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
0166 {
0167     int start_page, end_page, mpage, pg;
0168     struct list_head *p;
0169     struct snd_emu10k1_memblk *q;
0170 
0171     /* calculate the expected size of empty region */
0172     p = blk->mapped_link.prev;
0173     if (p != &emu->mapped_link_head) {
0174         q = get_emu10k1_memblk(p, mapped_link);
0175         start_page = q->mapped_page + q->pages;
0176     } else {
0177         start_page = 1;
0178     }
0179     p = blk->mapped_link.next;
0180     if (p != &emu->mapped_link_head) {
0181         q = get_emu10k1_memblk(p, mapped_link);
0182         end_page = q->mapped_page;
0183     } else {
0184         end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
0185     }
0186 
0187     /* remove links */
0188     list_del(&blk->mapped_link);
0189     list_del(&blk->mapped_order_link);
0190     /* clear PTB */
0191     mpage = blk->mapped_page;
0192     for (pg = blk->first_page; pg <= blk->last_page; pg++) {
0193         set_silent_ptb(emu, mpage);
0194         mpage++;
0195     }
0196     blk->mapped_page = -1;
0197     return end_page - start_page; /* return the new empty size */
0198 }
0199 
0200 /*
0201  * search empty pages with the given size, and create a memory block
0202  *
0203  * unlike synth_alloc the memory block is aligned to the page start
0204  */
0205 static struct snd_emu10k1_memblk *
0206 search_empty(struct snd_emu10k1 *emu, int size)
0207 {
0208     struct list_head *p;
0209     struct snd_emu10k1_memblk *blk;
0210     int page, psize;
0211 
0212     psize = get_aligned_page(size + PAGE_SIZE -1);
0213     page = 0;
0214     list_for_each(p, &emu->memhdr->block) {
0215         blk = get_emu10k1_memblk(p, mem.list);
0216         if (page + psize <= blk->first_page)
0217             goto __found_pages;
0218         page = blk->last_page + 1;
0219     }
0220     if (page + psize > emu->max_cache_pages)
0221         return NULL;
0222 
0223 __found_pages:
0224     /* create a new memory block */
0225     blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
0226     if (blk == NULL)
0227         return NULL;
0228     blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
0229     emu10k1_memblk_init(blk);
0230     return blk;
0231 }
0232 
0233 
0234 /*
0235  * check if the given pointer is valid for pages
0236  */
0237 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
0238 {
0239     if (addr & ~emu->dma_mask) {
0240         dev_err_ratelimited(emu->card->dev,
0241             "max memory size is 0x%lx (addr = 0x%lx)!!\n",
0242             emu->dma_mask, (unsigned long)addr);
0243         return 0;
0244     }
0245     if (addr & (EMUPAGESIZE-1)) {
0246         dev_err_ratelimited(emu->card->dev, "page is not aligned\n");
0247         return 0;
0248     }
0249     return 1;
0250 }
0251 
0252 /*
0253  * map the given memory block on PTB.
0254  * if the block is already mapped, update the link order.
0255  * if no empty pages are found, tries to release unused memory blocks
0256  * and retry the mapping.
0257  */
0258 int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
0259 {
0260     int err;
0261     int size;
0262     struct list_head *p, *nextp;
0263     struct snd_emu10k1_memblk *deleted;
0264     unsigned long flags;
0265 
0266     spin_lock_irqsave(&emu->memblk_lock, flags);
0267     if (blk->mapped_page >= 0) {
0268         /* update order link */
0269         list_move_tail(&blk->mapped_order_link,
0270                    &emu->mapped_order_link_head);
0271         spin_unlock_irqrestore(&emu->memblk_lock, flags);
0272         return 0;
0273     }
0274     err = map_memblk(emu, blk);
0275     if (err < 0) {
0276         /* no enough page - try to unmap some blocks */
0277         /* starting from the oldest block */
0278         p = emu->mapped_order_link_head.next;
0279         for (; p != &emu->mapped_order_link_head; p = nextp) {
0280             nextp = p->next;
0281             deleted = get_emu10k1_memblk(p, mapped_order_link);
0282             if (deleted->map_locked)
0283                 continue;
0284             size = unmap_memblk(emu, deleted);
0285             if (size >= blk->pages) {
0286                 /* ok the empty region is enough large */
0287                 err = map_memblk(emu, blk);
0288                 break;
0289             }
0290         }
0291     }
0292     spin_unlock_irqrestore(&emu->memblk_lock, flags);
0293     return err;
0294 }
0295 
0296 EXPORT_SYMBOL(snd_emu10k1_memblk_map);
0297 
0298 /*
0299  * page allocation for DMA
0300  */
0301 struct snd_util_memblk *
0302 snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
0303 {
0304     struct snd_pcm_runtime *runtime = substream->runtime;
0305     struct snd_util_memhdr *hdr;
0306     struct snd_emu10k1_memblk *blk;
0307     int page, err, idx;
0308 
0309     if (snd_BUG_ON(!emu))
0310         return NULL;
0311     if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
0312                runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
0313         return NULL;
0314     hdr = emu->memhdr;
0315     if (snd_BUG_ON(!hdr))
0316         return NULL;
0317 
0318     idx = runtime->period_size >= runtime->buffer_size ?
0319                     (emu->delay_pcm_irq * 2) : 0;
0320     mutex_lock(&hdr->block_mutex);
0321     blk = search_empty(emu, runtime->dma_bytes + idx);
0322     if (blk == NULL) {
0323         mutex_unlock(&hdr->block_mutex);
0324         return NULL;
0325     }
0326     /* fill buffer addresses but pointers are not stored so that
0327      * snd_free_pci_page() is not called in synth_free()
0328      */
0329     idx = 0;
0330     for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
0331         unsigned long ofs = idx << PAGE_SHIFT;
0332         dma_addr_t addr;
0333         if (ofs >= runtime->dma_bytes)
0334             addr = emu->silent_page.addr;
0335         else
0336             addr = snd_pcm_sgbuf_get_addr(substream, ofs);
0337         if (! is_valid_page(emu, addr)) {
0338             dev_err_ratelimited(emu->card->dev,
0339                 "emu: failure page = %d\n", idx);
0340             mutex_unlock(&hdr->block_mutex);
0341             return NULL;
0342         }
0343         emu->page_addr_table[page] = addr;
0344         emu->page_ptr_table[page] = NULL;
0345     }
0346 
0347     /* set PTB entries */
0348     blk->map_locked = 1; /* do not unmap this block! */
0349     err = snd_emu10k1_memblk_map(emu, blk);
0350     if (err < 0) {
0351         __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
0352         mutex_unlock(&hdr->block_mutex);
0353         return NULL;
0354     }
0355     mutex_unlock(&hdr->block_mutex);
0356     return (struct snd_util_memblk *)blk;
0357 }
0358 
0359 
0360 /*
0361  * release DMA buffer from page table
0362  */
0363 int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
0364 {
0365     if (snd_BUG_ON(!emu || !blk))
0366         return -EINVAL;
0367     return snd_emu10k1_synth_free(emu, blk);
0368 }
0369 
0370 /*
0371  * allocate DMA pages, widening the allocation if necessary
0372  *
0373  * See the comment above snd_emu10k1_detect_iommu() in emu10k1_main.c why
0374  * this might be needed.
0375  *
0376  * If you modify this function check whether __synth_free_pages() also needs
0377  * changes.
0378  */
0379 int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size,
0380                     struct snd_dma_buffer *dmab)
0381 {
0382     if (emu->iommu_workaround) {
0383         size_t npages = DIV_ROUND_UP(size, PAGE_SIZE);
0384         size_t size_real = npages * PAGE_SIZE;
0385 
0386         /*
0387          * The device has been observed to accesses up to 256 extra
0388          * bytes, but use 1k to be safe.
0389          */
0390         if (size_real < size + 1024)
0391             size += PAGE_SIZE;
0392     }
0393 
0394     return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
0395                    &emu->pci->dev, size, dmab);
0396 }
0397 
0398 /*
0399  * memory allocation using multiple pages (for synth)
0400  * Unlike the DMA allocation above, non-contiguous pages are assined.
0401  */
0402 
0403 /*
0404  * allocate a synth sample area
0405  */
0406 struct snd_util_memblk *
0407 snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
0408 {
0409     struct snd_emu10k1_memblk *blk;
0410     struct snd_util_memhdr *hdr = hw->memhdr; 
0411 
0412     mutex_lock(&hdr->block_mutex);
0413     blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
0414     if (blk == NULL) {
0415         mutex_unlock(&hdr->block_mutex);
0416         return NULL;
0417     }
0418     if (synth_alloc_pages(hw, blk)) {
0419         __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
0420         mutex_unlock(&hdr->block_mutex);
0421         return NULL;
0422     }
0423     snd_emu10k1_memblk_map(hw, blk);
0424     mutex_unlock(&hdr->block_mutex);
0425     return (struct snd_util_memblk *)blk;
0426 }
0427 
0428 EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
0429 
0430 /*
0431  * free a synth sample area
0432  */
0433 int
0434 snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
0435 {
0436     struct snd_util_memhdr *hdr = emu->memhdr; 
0437     struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
0438     unsigned long flags;
0439 
0440     mutex_lock(&hdr->block_mutex);
0441     spin_lock_irqsave(&emu->memblk_lock, flags);
0442     if (blk->mapped_page >= 0)
0443         unmap_memblk(emu, blk);
0444     spin_unlock_irqrestore(&emu->memblk_lock, flags);
0445     synth_free_pages(emu, blk);
0446      __snd_util_mem_free(hdr, memblk);
0447     mutex_unlock(&hdr->block_mutex);
0448     return 0;
0449 }
0450 
0451 EXPORT_SYMBOL(snd_emu10k1_synth_free);
0452 
0453 /* check new allocation range */
0454 static void get_single_page_range(struct snd_util_memhdr *hdr,
0455                   struct snd_emu10k1_memblk *blk,
0456                   int *first_page_ret, int *last_page_ret)
0457 {
0458     struct list_head *p;
0459     struct snd_emu10k1_memblk *q;
0460     int first_page, last_page;
0461     first_page = blk->first_page;
0462     p = blk->mem.list.prev;
0463     if (p != &hdr->block) {
0464         q = get_emu10k1_memblk(p, mem.list);
0465         if (q->last_page == first_page)
0466             first_page++;  /* first page was already allocated */
0467     }
0468     last_page = blk->last_page;
0469     p = blk->mem.list.next;
0470     if (p != &hdr->block) {
0471         q = get_emu10k1_memblk(p, mem.list);
0472         if (q->first_page == last_page)
0473             last_page--; /* last page was already allocated */
0474     }
0475     *first_page_ret = first_page;
0476     *last_page_ret = last_page;
0477 }
0478 
0479 /* release allocated pages */
0480 static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
0481                    int last_page)
0482 {
0483     struct snd_dma_buffer dmab;
0484     int page;
0485 
0486     dmab.dev.type = SNDRV_DMA_TYPE_DEV;
0487     dmab.dev.dev = &emu->pci->dev;
0488 
0489     for (page = first_page; page <= last_page; page++) {
0490         if (emu->page_ptr_table[page] == NULL)
0491             continue;
0492         dmab.area = emu->page_ptr_table[page];
0493         dmab.addr = emu->page_addr_table[page];
0494 
0495         /*
0496          * please keep me in sync with logic in
0497          * snd_emu10k1_alloc_pages_maybe_wider()
0498          */
0499         dmab.bytes = PAGE_SIZE;
0500         if (emu->iommu_workaround)
0501             dmab.bytes *= 2;
0502 
0503         snd_dma_free_pages(&dmab);
0504         emu->page_addr_table[page] = 0;
0505         emu->page_ptr_table[page] = NULL;
0506     }
0507 }
0508 
0509 /*
0510  * allocate kernel pages
0511  */
0512 static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
0513 {
0514     int page, first_page, last_page;
0515     struct snd_dma_buffer dmab;
0516 
0517     emu10k1_memblk_init(blk);
0518     get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
0519     /* allocate kernel pages */
0520     for (page = first_page; page <= last_page; page++) {
0521         if (snd_emu10k1_alloc_pages_maybe_wider(emu, PAGE_SIZE,
0522                             &dmab) < 0)
0523             goto __fail;
0524         if (!is_valid_page(emu, dmab.addr)) {
0525             snd_dma_free_pages(&dmab);
0526             goto __fail;
0527         }
0528         emu->page_addr_table[page] = dmab.addr;
0529         emu->page_ptr_table[page] = dmab.area;
0530     }
0531     return 0;
0532 
0533 __fail:
0534     /* release allocated pages */
0535     last_page = page - 1;
0536     __synth_free_pages(emu, first_page, last_page);
0537 
0538     return -ENOMEM;
0539 }
0540 
0541 /*
0542  * free pages
0543  */
0544 static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
0545 {
0546     int first_page, last_page;
0547 
0548     get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
0549     __synth_free_pages(emu, first_page, last_page);
0550     return 0;
0551 }
0552 
0553 /* calculate buffer pointer from offset address */
0554 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
0555 {
0556     char *ptr;
0557     if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
0558         return NULL;
0559     ptr = emu->page_ptr_table[page];
0560     if (! ptr) {
0561         dev_err(emu->card->dev,
0562             "access to NULL ptr: page = %d\n", page);
0563         return NULL;
0564     }
0565     ptr += offset & (PAGE_SIZE - 1);
0566     return (void*)ptr;
0567 }
0568 
0569 /*
0570  * bzero(blk + offset, size)
0571  */
0572 int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
0573                 int offset, int size)
0574 {
0575     int page, nextofs, end_offset, temp, temp1;
0576     void *ptr;
0577     struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
0578 
0579     offset += blk->offset & (PAGE_SIZE - 1);
0580     end_offset = offset + size;
0581     page = get_aligned_page(offset);
0582     do {
0583         nextofs = aligned_page_offset(page + 1);
0584         temp = nextofs - offset;
0585         temp1 = end_offset - offset;
0586         if (temp1 < temp)
0587             temp = temp1;
0588         ptr = offset_ptr(emu, page + p->first_page, offset);
0589         if (ptr)
0590             memset(ptr, 0, temp);
0591         offset = nextofs;
0592         page++;
0593     } while (offset < end_offset);
0594     return 0;
0595 }
0596 
0597 EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
0598 
0599 /*
0600  * copy_from_user(blk + offset, data, size)
0601  */
0602 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
0603                      int offset, const char __user *data, int size)
0604 {
0605     int page, nextofs, end_offset, temp, temp1;
0606     void *ptr;
0607     struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
0608 
0609     offset += blk->offset & (PAGE_SIZE - 1);
0610     end_offset = offset + size;
0611     page = get_aligned_page(offset);
0612     do {
0613         nextofs = aligned_page_offset(page + 1);
0614         temp = nextofs - offset;
0615         temp1 = end_offset - offset;
0616         if (temp1 < temp)
0617             temp = temp1;
0618         ptr = offset_ptr(emu, page + p->first_page, offset);
0619         if (ptr && copy_from_user(ptr, data, temp))
0620             return -EFAULT;
0621         offset = nextofs;
0622         data += temp;
0623         page++;
0624     } while (offset < end_offset);
0625     return 0;
0626 }
0627 
0628 EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);