0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/pci.h>
0010 #include <linux/gfp.h>
0011 #include <linux/time.h>
0012 #include <linux/mutex.h>
0013 #include <linux/export.h>
0014
0015 #include <sound/core.h>
0016 #include <sound/emu10k1.h>
0017
0018
0019
0020
0021 #define __set_ptb_entry(emu,page,addr) \
0022 (((__le32 *)(emu)->ptb_pages.area)[page] = \
0023 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
0024 #define __get_ptb_entry(emu, page) \
0025 (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
0026
0027 #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
0028 #define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES)
0029 #define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES)
0030
0031 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
0032
0033 #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
0034
0035 #if PAGE_SIZE == EMUPAGESIZE && !IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
0036
0037 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
0038
0039 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
0040 #else
0041
0042 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
0043 {
0044 int i;
0045 page *= UNIT_PAGES;
0046 for (i = 0; i < UNIT_PAGES; i++, page++) {
0047 __set_ptb_entry(emu, page, addr);
0048 dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page,
0049 (unsigned int)__get_ptb_entry(emu, page));
0050 addr += EMUPAGESIZE;
0051 }
0052 }
0053 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
0054 {
0055 int i;
0056 page *= UNIT_PAGES;
0057 for (i = 0; i < UNIT_PAGES; i++, page++) {
0058
0059 __set_ptb_entry(emu, page, emu->silent_page.addr);
0060 dev_dbg(emu->card->dev, "mapped silent page %d to entry %.8x\n",
0061 page, (unsigned int)__get_ptb_entry(emu, page));
0062 }
0063 }
0064 #endif
0065
0066
0067
0068
0069 static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
0070 static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
0071
0072 #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
0073
0074
0075
0076 static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
0077 {
0078 blk->mapped_page = -1;
0079 INIT_LIST_HEAD(&blk->mapped_link);
0080 INIT_LIST_HEAD(&blk->mapped_order_link);
0081 blk->map_locked = 0;
0082
0083 blk->first_page = get_aligned_page(blk->mem.offset);
0084 blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
0085 blk->pages = blk->last_page - blk->first_page + 1;
0086 }
0087
0088
0089
0090
0091
0092
0093
0094
0095 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
0096 {
0097 int page = 1, found_page = -ENOMEM;
0098 int max_size = npages;
0099 int size;
0100 struct list_head *candidate = &emu->mapped_link_head;
0101 struct list_head *pos;
0102
0103 list_for_each (pos, &emu->mapped_link_head) {
0104 struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
0105 if (blk->mapped_page < 0)
0106 continue;
0107 size = blk->mapped_page - page;
0108 if (size == npages) {
0109 *nextp = pos;
0110 return page;
0111 }
0112 else if (size > max_size) {
0113
0114 max_size = size;
0115 candidate = pos;
0116 found_page = page;
0117 }
0118 page = blk->mapped_page + blk->pages;
0119 }
0120 size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
0121 if (size >= max_size) {
0122 *nextp = pos;
0123 return page;
0124 }
0125 *nextp = candidate;
0126 return found_page;
0127 }
0128
0129
0130
0131
0132
0133
0134 static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
0135 {
0136 int page, pg;
0137 struct list_head *next;
0138
0139 page = search_empty_map_area(emu, blk->pages, &next);
0140 if (page < 0)
0141 return page;
0142 if (page == 0) {
0143 dev_err(emu->card->dev, "trying to map zero (reserved) page\n");
0144 return -EINVAL;
0145 }
0146
0147 list_add_tail(&blk->mapped_link, next);
0148
0149 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
0150 blk->mapped_page = page;
0151
0152 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
0153 set_ptb_entry(emu, page, emu->page_addr_table[pg]);
0154 page++;
0155 }
0156 return 0;
0157 }
0158
0159
0160
0161
0162
0163
0164
0165 static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
0166 {
0167 int start_page, end_page, mpage, pg;
0168 struct list_head *p;
0169 struct snd_emu10k1_memblk *q;
0170
0171
0172 p = blk->mapped_link.prev;
0173 if (p != &emu->mapped_link_head) {
0174 q = get_emu10k1_memblk(p, mapped_link);
0175 start_page = q->mapped_page + q->pages;
0176 } else {
0177 start_page = 1;
0178 }
0179 p = blk->mapped_link.next;
0180 if (p != &emu->mapped_link_head) {
0181 q = get_emu10k1_memblk(p, mapped_link);
0182 end_page = q->mapped_page;
0183 } else {
0184 end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
0185 }
0186
0187
0188 list_del(&blk->mapped_link);
0189 list_del(&blk->mapped_order_link);
0190
0191 mpage = blk->mapped_page;
0192 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
0193 set_silent_ptb(emu, mpage);
0194 mpage++;
0195 }
0196 blk->mapped_page = -1;
0197 return end_page - start_page;
0198 }
0199
0200
0201
0202
0203
0204
0205 static struct snd_emu10k1_memblk *
0206 search_empty(struct snd_emu10k1 *emu, int size)
0207 {
0208 struct list_head *p;
0209 struct snd_emu10k1_memblk *blk;
0210 int page, psize;
0211
0212 psize = get_aligned_page(size + PAGE_SIZE -1);
0213 page = 0;
0214 list_for_each(p, &emu->memhdr->block) {
0215 blk = get_emu10k1_memblk(p, mem.list);
0216 if (page + psize <= blk->first_page)
0217 goto __found_pages;
0218 page = blk->last_page + 1;
0219 }
0220 if (page + psize > emu->max_cache_pages)
0221 return NULL;
0222
0223 __found_pages:
0224
0225 blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
0226 if (blk == NULL)
0227 return NULL;
0228 blk->mem.offset = aligned_page_offset(page);
0229 emu10k1_memblk_init(blk);
0230 return blk;
0231 }
0232
0233
0234
0235
0236
0237 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
0238 {
0239 if (addr & ~emu->dma_mask) {
0240 dev_err_ratelimited(emu->card->dev,
0241 "max memory size is 0x%lx (addr = 0x%lx)!!\n",
0242 emu->dma_mask, (unsigned long)addr);
0243 return 0;
0244 }
0245 if (addr & (EMUPAGESIZE-1)) {
0246 dev_err_ratelimited(emu->card->dev, "page is not aligned\n");
0247 return 0;
0248 }
0249 return 1;
0250 }
0251
0252
0253
0254
0255
0256
0257
0258 int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
0259 {
0260 int err;
0261 int size;
0262 struct list_head *p, *nextp;
0263 struct snd_emu10k1_memblk *deleted;
0264 unsigned long flags;
0265
0266 spin_lock_irqsave(&emu->memblk_lock, flags);
0267 if (blk->mapped_page >= 0) {
0268
0269 list_move_tail(&blk->mapped_order_link,
0270 &emu->mapped_order_link_head);
0271 spin_unlock_irqrestore(&emu->memblk_lock, flags);
0272 return 0;
0273 }
0274 err = map_memblk(emu, blk);
0275 if (err < 0) {
0276
0277
0278 p = emu->mapped_order_link_head.next;
0279 for (; p != &emu->mapped_order_link_head; p = nextp) {
0280 nextp = p->next;
0281 deleted = get_emu10k1_memblk(p, mapped_order_link);
0282 if (deleted->map_locked)
0283 continue;
0284 size = unmap_memblk(emu, deleted);
0285 if (size >= blk->pages) {
0286
0287 err = map_memblk(emu, blk);
0288 break;
0289 }
0290 }
0291 }
0292 spin_unlock_irqrestore(&emu->memblk_lock, flags);
0293 return err;
0294 }
0295
0296 EXPORT_SYMBOL(snd_emu10k1_memblk_map);
0297
0298
0299
0300
0301 struct snd_util_memblk *
0302 snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
0303 {
0304 struct snd_pcm_runtime *runtime = substream->runtime;
0305 struct snd_util_memhdr *hdr;
0306 struct snd_emu10k1_memblk *blk;
0307 int page, err, idx;
0308
0309 if (snd_BUG_ON(!emu))
0310 return NULL;
0311 if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
0312 runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
0313 return NULL;
0314 hdr = emu->memhdr;
0315 if (snd_BUG_ON(!hdr))
0316 return NULL;
0317
0318 idx = runtime->period_size >= runtime->buffer_size ?
0319 (emu->delay_pcm_irq * 2) : 0;
0320 mutex_lock(&hdr->block_mutex);
0321 blk = search_empty(emu, runtime->dma_bytes + idx);
0322 if (blk == NULL) {
0323 mutex_unlock(&hdr->block_mutex);
0324 return NULL;
0325 }
0326
0327
0328
0329 idx = 0;
0330 for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
0331 unsigned long ofs = idx << PAGE_SHIFT;
0332 dma_addr_t addr;
0333 if (ofs >= runtime->dma_bytes)
0334 addr = emu->silent_page.addr;
0335 else
0336 addr = snd_pcm_sgbuf_get_addr(substream, ofs);
0337 if (! is_valid_page(emu, addr)) {
0338 dev_err_ratelimited(emu->card->dev,
0339 "emu: failure page = %d\n", idx);
0340 mutex_unlock(&hdr->block_mutex);
0341 return NULL;
0342 }
0343 emu->page_addr_table[page] = addr;
0344 emu->page_ptr_table[page] = NULL;
0345 }
0346
0347
0348 blk->map_locked = 1;
0349 err = snd_emu10k1_memblk_map(emu, blk);
0350 if (err < 0) {
0351 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
0352 mutex_unlock(&hdr->block_mutex);
0353 return NULL;
0354 }
0355 mutex_unlock(&hdr->block_mutex);
0356 return (struct snd_util_memblk *)blk;
0357 }
0358
0359
0360
0361
0362
0363 int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
0364 {
0365 if (snd_BUG_ON(!emu || !blk))
0366 return -EINVAL;
0367 return snd_emu10k1_synth_free(emu, blk);
0368 }
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379 int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size,
0380 struct snd_dma_buffer *dmab)
0381 {
0382 if (emu->iommu_workaround) {
0383 size_t npages = DIV_ROUND_UP(size, PAGE_SIZE);
0384 size_t size_real = npages * PAGE_SIZE;
0385
0386
0387
0388
0389
0390 if (size_real < size + 1024)
0391 size += PAGE_SIZE;
0392 }
0393
0394 return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
0395 &emu->pci->dev, size, dmab);
0396 }
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406 struct snd_util_memblk *
0407 snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
0408 {
0409 struct snd_emu10k1_memblk *blk;
0410 struct snd_util_memhdr *hdr = hw->memhdr;
0411
0412 mutex_lock(&hdr->block_mutex);
0413 blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
0414 if (blk == NULL) {
0415 mutex_unlock(&hdr->block_mutex);
0416 return NULL;
0417 }
0418 if (synth_alloc_pages(hw, blk)) {
0419 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
0420 mutex_unlock(&hdr->block_mutex);
0421 return NULL;
0422 }
0423 snd_emu10k1_memblk_map(hw, blk);
0424 mutex_unlock(&hdr->block_mutex);
0425 return (struct snd_util_memblk *)blk;
0426 }
0427
0428 EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
0429
0430
0431
0432
0433 int
0434 snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
0435 {
0436 struct snd_util_memhdr *hdr = emu->memhdr;
0437 struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
0438 unsigned long flags;
0439
0440 mutex_lock(&hdr->block_mutex);
0441 spin_lock_irqsave(&emu->memblk_lock, flags);
0442 if (blk->mapped_page >= 0)
0443 unmap_memblk(emu, blk);
0444 spin_unlock_irqrestore(&emu->memblk_lock, flags);
0445 synth_free_pages(emu, blk);
0446 __snd_util_mem_free(hdr, memblk);
0447 mutex_unlock(&hdr->block_mutex);
0448 return 0;
0449 }
0450
0451 EXPORT_SYMBOL(snd_emu10k1_synth_free);
0452
0453
0454 static void get_single_page_range(struct snd_util_memhdr *hdr,
0455 struct snd_emu10k1_memblk *blk,
0456 int *first_page_ret, int *last_page_ret)
0457 {
0458 struct list_head *p;
0459 struct snd_emu10k1_memblk *q;
0460 int first_page, last_page;
0461 first_page = blk->first_page;
0462 p = blk->mem.list.prev;
0463 if (p != &hdr->block) {
0464 q = get_emu10k1_memblk(p, mem.list);
0465 if (q->last_page == first_page)
0466 first_page++;
0467 }
0468 last_page = blk->last_page;
0469 p = blk->mem.list.next;
0470 if (p != &hdr->block) {
0471 q = get_emu10k1_memblk(p, mem.list);
0472 if (q->first_page == last_page)
0473 last_page--;
0474 }
0475 *first_page_ret = first_page;
0476 *last_page_ret = last_page;
0477 }
0478
0479
0480 static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
0481 int last_page)
0482 {
0483 struct snd_dma_buffer dmab;
0484 int page;
0485
0486 dmab.dev.type = SNDRV_DMA_TYPE_DEV;
0487 dmab.dev.dev = &emu->pci->dev;
0488
0489 for (page = first_page; page <= last_page; page++) {
0490 if (emu->page_ptr_table[page] == NULL)
0491 continue;
0492 dmab.area = emu->page_ptr_table[page];
0493 dmab.addr = emu->page_addr_table[page];
0494
0495
0496
0497
0498
0499 dmab.bytes = PAGE_SIZE;
0500 if (emu->iommu_workaround)
0501 dmab.bytes *= 2;
0502
0503 snd_dma_free_pages(&dmab);
0504 emu->page_addr_table[page] = 0;
0505 emu->page_ptr_table[page] = NULL;
0506 }
0507 }
0508
0509
0510
0511
0512 static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
0513 {
0514 int page, first_page, last_page;
0515 struct snd_dma_buffer dmab;
0516
0517 emu10k1_memblk_init(blk);
0518 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
0519
0520 for (page = first_page; page <= last_page; page++) {
0521 if (snd_emu10k1_alloc_pages_maybe_wider(emu, PAGE_SIZE,
0522 &dmab) < 0)
0523 goto __fail;
0524 if (!is_valid_page(emu, dmab.addr)) {
0525 snd_dma_free_pages(&dmab);
0526 goto __fail;
0527 }
0528 emu->page_addr_table[page] = dmab.addr;
0529 emu->page_ptr_table[page] = dmab.area;
0530 }
0531 return 0;
0532
0533 __fail:
0534
0535 last_page = page - 1;
0536 __synth_free_pages(emu, first_page, last_page);
0537
0538 return -ENOMEM;
0539 }
0540
0541
0542
0543
0544 static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
0545 {
0546 int first_page, last_page;
0547
0548 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
0549 __synth_free_pages(emu, first_page, last_page);
0550 return 0;
0551 }
0552
0553
0554 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
0555 {
0556 char *ptr;
0557 if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
0558 return NULL;
0559 ptr = emu->page_ptr_table[page];
0560 if (! ptr) {
0561 dev_err(emu->card->dev,
0562 "access to NULL ptr: page = %d\n", page);
0563 return NULL;
0564 }
0565 ptr += offset & (PAGE_SIZE - 1);
0566 return (void*)ptr;
0567 }
0568
0569
0570
0571
0572 int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
0573 int offset, int size)
0574 {
0575 int page, nextofs, end_offset, temp, temp1;
0576 void *ptr;
0577 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
0578
0579 offset += blk->offset & (PAGE_SIZE - 1);
0580 end_offset = offset + size;
0581 page = get_aligned_page(offset);
0582 do {
0583 nextofs = aligned_page_offset(page + 1);
0584 temp = nextofs - offset;
0585 temp1 = end_offset - offset;
0586 if (temp1 < temp)
0587 temp = temp1;
0588 ptr = offset_ptr(emu, page + p->first_page, offset);
0589 if (ptr)
0590 memset(ptr, 0, temp);
0591 offset = nextofs;
0592 page++;
0593 } while (offset < end_offset);
0594 return 0;
0595 }
0596
0597 EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
0598
0599
0600
0601
0602 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
0603 int offset, const char __user *data, int size)
0604 {
0605 int page, nextofs, end_offset, temp, temp1;
0606 void *ptr;
0607 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
0608
0609 offset += blk->offset & (PAGE_SIZE - 1);
0610 end_offset = offset + size;
0611 page = get_aligned_page(offset);
0612 do {
0613 nextofs = aligned_page_offset(page + 1);
0614 temp = nextofs - offset;
0615 temp1 = end_offset - offset;
0616 if (temp1 < temp)
0617 temp = temp1;
0618 ptr = offset_ptr(emu, page + p->first_page, offset);
0619 if (ptr && copy_from_user(ptr, data, temp))
0620 return -EFAULT;
0621 offset = nextofs;
0622 data += temp;
0623 page++;
0624 } while (offset < end_offset);
0625 return 0;
0626 }
0627
0628 EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);