0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #include "ctvmem.h"
0016 #include "ctatc.h"
0017 #include <linux/slab.h>
0018 #include <linux/mm.h>
0019 #include <linux/io.h>
0020 #include <sound/pcm.h>
0021
0022 #define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *))
0023 #define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE)
0024
0025
0026
0027
0028
0029 static struct ct_vm_block *
0030 get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc)
0031 {
0032 struct ct_vm_block *block = NULL, *entry;
0033 struct list_head *pos;
0034
0035 size = CT_PAGE_ALIGN(size);
0036 if (size > vm->size) {
0037 dev_err(atc->card->dev,
0038 "Fail! No sufficient device virtual memory space available!\n");
0039 return NULL;
0040 }
0041
0042 mutex_lock(&vm->lock);
0043 list_for_each(pos, &vm->unused) {
0044 entry = list_entry(pos, struct ct_vm_block, list);
0045 if (entry->size >= size)
0046 break;
0047 }
0048 if (pos == &vm->unused)
0049 goto out;
0050
0051 if (entry->size == size) {
0052
0053 list_move(&entry->list, &vm->used);
0054 vm->size -= size;
0055 block = entry;
0056 goto out;
0057 }
0058
0059 block = kzalloc(sizeof(*block), GFP_KERNEL);
0060 if (!block)
0061 goto out;
0062
0063 block->addr = entry->addr;
0064 block->size = size;
0065 list_add(&block->list, &vm->used);
0066 entry->addr += size;
0067 entry->size -= size;
0068 vm->size -= size;
0069
0070 out:
0071 mutex_unlock(&vm->lock);
0072 return block;
0073 }
0074
0075 static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block)
0076 {
0077 struct ct_vm_block *entry, *pre_ent;
0078 struct list_head *pos, *pre;
0079
0080 block->size = CT_PAGE_ALIGN(block->size);
0081
0082 mutex_lock(&vm->lock);
0083 list_del(&block->list);
0084 vm->size += block->size;
0085
0086 list_for_each(pos, &vm->unused) {
0087 entry = list_entry(pos, struct ct_vm_block, list);
0088 if (entry->addr >= (block->addr + block->size))
0089 break;
0090 }
0091 if (pos == &vm->unused) {
0092 list_add_tail(&block->list, &vm->unused);
0093 entry = block;
0094 } else {
0095 if ((block->addr + block->size) == entry->addr) {
0096 entry->addr = block->addr;
0097 entry->size += block->size;
0098 kfree(block);
0099 } else {
0100 __list_add(&block->list, pos->prev, pos);
0101 entry = block;
0102 }
0103 }
0104
0105 pos = &entry->list;
0106 pre = pos->prev;
0107 while (pre != &vm->unused) {
0108 entry = list_entry(pos, struct ct_vm_block, list);
0109 pre_ent = list_entry(pre, struct ct_vm_block, list);
0110 if ((pre_ent->addr + pre_ent->size) > entry->addr)
0111 break;
0112
0113 pre_ent->size += entry->size;
0114 list_del(pos);
0115 kfree(entry);
0116 pos = pre;
0117 pre = pos->prev;
0118 }
0119 mutex_unlock(&vm->lock);
0120 }
0121
0122
0123 static struct ct_vm_block *
0124 ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
0125 {
0126 struct ct_vm_block *block;
0127 unsigned int pte_start;
0128 unsigned i, pages;
0129 unsigned long *ptp;
0130 struct ct_atc *atc = snd_pcm_substream_chip(substream);
0131
0132 block = get_vm_block(vm, size, atc);
0133 if (block == NULL) {
0134 dev_err(atc->card->dev,
0135 "No virtual memory block that is big enough to allocate!\n");
0136 return NULL;
0137 }
0138
0139 ptp = (unsigned long *)vm->ptp[0].area;
0140 pte_start = (block->addr >> CT_PAGE_SHIFT);
0141 pages = block->size >> CT_PAGE_SHIFT;
0142 for (i = 0; i < pages; i++) {
0143 unsigned long addr;
0144 addr = snd_pcm_sgbuf_get_addr(substream, i << CT_PAGE_SHIFT);
0145 ptp[pte_start + i] = addr;
0146 }
0147
0148 block->size = size;
0149 return block;
0150 }
0151
0152 static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
0153 {
0154
0155 put_vm_block(vm, block);
0156 }
0157
0158
0159
0160
0161
0162
0163 static dma_addr_t
0164 ct_get_ptp_phys(struct ct_vm *vm, int index)
0165 {
0166 return (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
0167 }
0168
0169 int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
0170 {
0171 struct ct_vm *vm;
0172 struct ct_vm_block *block;
0173 int i, err = 0;
0174
0175 *rvm = NULL;
0176
0177 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
0178 if (!vm)
0179 return -ENOMEM;
0180
0181 mutex_init(&vm->lock);
0182
0183
0184 for (i = 0; i < CT_PTP_NUM; i++) {
0185 err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
0186 &pci->dev,
0187 PAGE_SIZE, &vm->ptp[i]);
0188 if (err < 0)
0189 break;
0190 }
0191 if (err < 0) {
0192
0193 ct_vm_destroy(vm);
0194 return -ENOMEM;
0195 }
0196 vm->size = CT_ADDRS_PER_PAGE * i;
0197 vm->map = ct_vm_map;
0198 vm->unmap = ct_vm_unmap;
0199 vm->get_ptp_phys = ct_get_ptp_phys;
0200 INIT_LIST_HEAD(&vm->unused);
0201 INIT_LIST_HEAD(&vm->used);
0202 block = kzalloc(sizeof(*block), GFP_KERNEL);
0203 if (NULL != block) {
0204 block->addr = 0;
0205 block->size = vm->size;
0206 list_add(&block->list, &vm->unused);
0207 }
0208
0209 *rvm = vm;
0210 return 0;
0211 }
0212
0213
0214
0215 void ct_vm_destroy(struct ct_vm *vm)
0216 {
0217 int i;
0218 struct list_head *pos;
0219 struct ct_vm_block *entry;
0220
0221
0222 while (!list_empty(&vm->used)) {
0223 pos = vm->used.next;
0224 list_del(pos);
0225 entry = list_entry(pos, struct ct_vm_block, list);
0226 kfree(entry);
0227 }
0228 while (!list_empty(&vm->unused)) {
0229 pos = vm->unused.next;
0230 list_del(pos);
0231 entry = list_entry(pos, struct ct_vm_block, list);
0232 kfree(entry);
0233 }
0234
0235
0236 for (i = 0; i < CT_PTP_NUM; i++)
0237 snd_dma_free_pages(&vm->ptp[i]);
0238
0239 vm->size = 0;
0240
0241 kfree(vm);
0242 }