0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #include <linux/export.h>
0032 #include <linux/log2.h>
0033 #include <linux/mm.h>
0034 #include <linux/mman.h>
0035 #include <linux/nospec.h>
0036 #include <linux/pci.h>
0037 #include <linux/slab.h>
0038 #include <linux/uaccess.h>
0039 #include <linux/vmalloc.h>
0040
0041 #include <asm/shmparam.h>
0042
0043 #include <drm/drm_device.h>
0044 #include <drm/drm_drv.h>
0045 #include <drm/drm_file.h>
0046 #include <drm/drm_print.h>
0047
0048 #include "drm_legacy.h"
0049
0050
0051 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
0052 struct drm_local_map *map)
0053 {
0054 struct drm_map_list *entry;
0055
0056 list_for_each_entry(entry, &dev->maplist, head) {
0057
0058
0059
0060
0061
0062
0063
0064
0065 if (!entry->map ||
0066 map->type != entry->map->type ||
0067 entry->master != dev->master)
0068 continue;
0069 switch (map->type) {
0070 case _DRM_SHM:
0071 if (map->flags != _DRM_CONTAINS_LOCK)
0072 break;
0073 return entry;
0074 case _DRM_REGISTERS:
0075 case _DRM_FRAME_BUFFER:
0076 if ((entry->map->offset & 0xffffffff) ==
0077 (map->offset & 0xffffffff))
0078 return entry;
0079 break;
0080 default:
0081 break;
0082 }
0083 if (entry->map->offset == map->offset)
0084 return entry;
0085 }
0086
0087 return NULL;
0088 }
0089
0090 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
0091 unsigned long user_token, int hashed_handle, int shm)
0092 {
0093 int use_hashed_handle, shift;
0094 unsigned long add;
0095
0096 #if (BITS_PER_LONG == 64)
0097 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
0098 #elif (BITS_PER_LONG == 32)
0099 use_hashed_handle = hashed_handle;
0100 #else
0101 #error Unsupported long size. Neither 64 nor 32 bits.
0102 #endif
0103
0104 if (!use_hashed_handle) {
0105 int ret;
0106
0107 hash->key = user_token >> PAGE_SHIFT;
0108 ret = drm_ht_insert_item(&dev->map_hash, hash);
0109 if (ret != -EINVAL)
0110 return ret;
0111 }
0112
0113 shift = 0;
0114 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
0115 if (shm && (SHMLBA > PAGE_SIZE)) {
0116 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130 shift = bits;
0131 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
0132 }
0133
0134 return drm_ht_just_insert_please(&dev->map_hash, hash,
0135 user_token, 32 - PAGE_SHIFT - 3,
0136 shift, add);
0137 }
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147 static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
0148 unsigned int size, enum drm_map_type type,
0149 enum drm_map_flags flags,
0150 struct drm_map_list **maplist)
0151 {
0152 struct drm_local_map *map;
0153 struct drm_map_list *list;
0154 unsigned long user_token;
0155 int ret;
0156
0157 map = kmalloc(sizeof(*map), GFP_KERNEL);
0158 if (!map)
0159 return -ENOMEM;
0160
0161 map->offset = offset;
0162 map->size = size;
0163 map->flags = flags;
0164 map->type = type;
0165
0166
0167
0168
0169
0170 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
0171 kfree(map);
0172 return -EINVAL;
0173 }
0174 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
0175 (unsigned long long)map->offset, map->size, map->type);
0176
0177
0178
0179
0180
0181 if (map->type == _DRM_SHM)
0182 map->size = PAGE_ALIGN(map->size);
0183
0184 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
0185 kfree(map);
0186 return -EINVAL;
0187 }
0188 map->mtrr = -1;
0189 map->handle = NULL;
0190
0191 switch (map->type) {
0192 case _DRM_REGISTERS:
0193 case _DRM_FRAME_BUFFER:
0194 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
0195 if (map->offset + (map->size-1) < map->offset ||
0196 map->offset < virt_to_phys(high_memory)) {
0197 kfree(map);
0198 return -EINVAL;
0199 }
0200 #endif
0201
0202
0203
0204
0205 list = drm_find_matching_map(dev, map);
0206 if (list != NULL) {
0207 if (list->map->size != map->size) {
0208 DRM_DEBUG("Matching maps of type %d with "
0209 "mismatched sizes, (%ld vs %ld)\n",
0210 map->type, map->size,
0211 list->map->size);
0212 list->map->size = map->size;
0213 }
0214
0215 kfree(map);
0216 *maplist = list;
0217 return 0;
0218 }
0219
0220 if (map->type == _DRM_FRAME_BUFFER ||
0221 (map->flags & _DRM_WRITE_COMBINING)) {
0222 map->mtrr =
0223 arch_phys_wc_add(map->offset, map->size);
0224 }
0225 if (map->type == _DRM_REGISTERS) {
0226 if (map->flags & _DRM_WRITE_COMBINING)
0227 map->handle = ioremap_wc(map->offset,
0228 map->size);
0229 else
0230 map->handle = ioremap(map->offset, map->size);
0231 if (!map->handle) {
0232 kfree(map);
0233 return -ENOMEM;
0234 }
0235 }
0236
0237 break;
0238 case _DRM_SHM:
0239 list = drm_find_matching_map(dev, map);
0240 if (list != NULL) {
0241 if (list->map->size != map->size) {
0242 DRM_DEBUG("Matching maps of type %d with "
0243 "mismatched sizes, (%ld vs %ld)\n",
0244 map->type, map->size, list->map->size);
0245 list->map->size = map->size;
0246 }
0247
0248 kfree(map);
0249 *maplist = list;
0250 return 0;
0251 }
0252 map->handle = vmalloc_user(map->size);
0253 DRM_DEBUG("%lu %d %p\n",
0254 map->size, order_base_2(map->size), map->handle);
0255 if (!map->handle) {
0256 kfree(map);
0257 return -ENOMEM;
0258 }
0259 map->offset = (unsigned long)map->handle;
0260 if (map->flags & _DRM_CONTAINS_LOCK) {
0261
0262 if (dev->master->lock.hw_lock != NULL) {
0263 vfree(map->handle);
0264 kfree(map);
0265 return -EBUSY;
0266 }
0267 dev->sigdata.lock = dev->master->lock.hw_lock = map->handle;
0268 }
0269 break;
0270 case _DRM_AGP: {
0271 struct drm_agp_mem *entry;
0272 int valid = 0;
0273
0274 if (!dev->agp) {
0275 kfree(map);
0276 return -EINVAL;
0277 }
0278 #ifdef __alpha__
0279 map->offset += dev->hose->mem_space->start;
0280 #endif
0281
0282
0283
0284
0285
0286
0287 if (map->offset < dev->agp->base ||
0288 map->offset > dev->agp->base +
0289 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
0290 map->offset += dev->agp->base;
0291 }
0292 map->mtrr = dev->agp->agp_mtrr;
0293
0294
0295
0296
0297
0298
0299
0300 list_for_each_entry(entry, &dev->agp->memory, head) {
0301 if ((map->offset >= entry->bound) &&
0302 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
0303 valid = 1;
0304 break;
0305 }
0306 }
0307 if (!list_empty(&dev->agp->memory) && !valid) {
0308 kfree(map);
0309 return -EPERM;
0310 }
0311 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
0312 (unsigned long long)map->offset, map->size);
0313
0314 break;
0315 }
0316 case _DRM_SCATTER_GATHER:
0317 if (!dev->sg) {
0318 kfree(map);
0319 return -EINVAL;
0320 }
0321 map->offset += (unsigned long)dev->sg->virtual;
0322 break;
0323 case _DRM_CONSISTENT:
0324
0325
0326
0327
0328
0329 map->handle = dma_alloc_coherent(dev->dev,
0330 map->size,
0331 &map->offset,
0332 GFP_KERNEL);
0333 if (!map->handle) {
0334 kfree(map);
0335 return -ENOMEM;
0336 }
0337 break;
0338 default:
0339 kfree(map);
0340 return -EINVAL;
0341 }
0342
0343 list = kzalloc(sizeof(*list), GFP_KERNEL);
0344 if (!list) {
0345 if (map->type == _DRM_REGISTERS)
0346 iounmap(map->handle);
0347 kfree(map);
0348 return -EINVAL;
0349 }
0350 list->map = map;
0351
0352 mutex_lock(&dev->struct_mutex);
0353 list_add(&list->head, &dev->maplist);
0354
0355
0356
0357 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
0358 map->offset;
0359 ret = drm_map_handle(dev, &list->hash, user_token, 0,
0360 (map->type == _DRM_SHM));
0361 if (ret) {
0362 if (map->type == _DRM_REGISTERS)
0363 iounmap(map->handle);
0364 kfree(map);
0365 kfree(list);
0366 mutex_unlock(&dev->struct_mutex);
0367 return ret;
0368 }
0369
0370 list->user_token = list->hash.key << PAGE_SHIFT;
0371 mutex_unlock(&dev->struct_mutex);
0372
0373 if (!(map->flags & _DRM_DRIVER))
0374 list->master = dev->master;
0375 *maplist = list;
0376 return 0;
0377 }
0378
0379 int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
0380 unsigned int size, enum drm_map_type type,
0381 enum drm_map_flags flags, struct drm_local_map **map_ptr)
0382 {
0383 struct drm_map_list *list;
0384 int rc;
0385
0386 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
0387 if (!rc)
0388 *map_ptr = list->map;
0389 return rc;
0390 }
0391 EXPORT_SYMBOL(drm_legacy_addmap);
0392
0393 struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
0394 unsigned int token)
0395 {
0396 struct drm_map_list *_entry;
0397
0398 list_for_each_entry(_entry, &dev->maplist, head)
0399 if (_entry->user_token == token)
0400 return _entry->map;
0401 return NULL;
0402 }
0403 EXPORT_SYMBOL(drm_legacy_findmap);
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
0417 struct drm_file *file_priv)
0418 {
0419 struct drm_map *map = data;
0420 struct drm_map_list *maplist;
0421 int err;
0422
0423 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
0424 return -EPERM;
0425
0426 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
0427 !drm_core_check_feature(dev, DRIVER_LEGACY))
0428 return -EOPNOTSUPP;
0429
0430 err = drm_addmap_core(dev, map->offset, map->size, map->type,
0431 map->flags, &maplist);
0432
0433 if (err)
0434 return err;
0435
0436
0437 map->handle = (void *)(unsigned long)maplist->user_token;
0438
0439
0440
0441
0442
0443
0444
0445 map->mtrr = -1;
0446
0447 return 0;
0448 }
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463 int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
0464 struct drm_file *file_priv)
0465 {
0466 struct drm_map *map = data;
0467 struct drm_map_list *r_list = NULL;
0468 struct list_head *list;
0469 int idx;
0470 int i;
0471
0472 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
0473 !drm_core_check_feature(dev, DRIVER_LEGACY))
0474 return -EOPNOTSUPP;
0475
0476 idx = map->offset;
0477 if (idx < 0)
0478 return -EINVAL;
0479
0480 i = 0;
0481 mutex_lock(&dev->struct_mutex);
0482 list_for_each(list, &dev->maplist) {
0483 if (i == idx) {
0484 r_list = list_entry(list, struct drm_map_list, head);
0485 break;
0486 }
0487 i++;
0488 }
0489 if (!r_list || !r_list->map) {
0490 mutex_unlock(&dev->struct_mutex);
0491 return -EINVAL;
0492 }
0493
0494 map->offset = r_list->map->offset;
0495 map->size = r_list->map->size;
0496 map->type = r_list->map->type;
0497 map->flags = r_list->map->flags;
0498 map->handle = (void *)(unsigned long) r_list->user_token;
0499 map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
0500
0501 mutex_unlock(&dev->struct_mutex);
0502
0503 return 0;
0504 }
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
0517 {
0518 struct drm_map_list *r_list = NULL, *list_t;
0519 int found = 0;
0520 struct drm_master *master;
0521
0522
0523 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
0524 if (r_list->map == map) {
0525 master = r_list->master;
0526 list_del(&r_list->head);
0527 drm_ht_remove_key(&dev->map_hash,
0528 r_list->user_token >> PAGE_SHIFT);
0529 kfree(r_list);
0530 found = 1;
0531 break;
0532 }
0533 }
0534
0535 if (!found)
0536 return -EINVAL;
0537
0538 switch (map->type) {
0539 case _DRM_REGISTERS:
0540 iounmap(map->handle);
0541 fallthrough;
0542 case _DRM_FRAME_BUFFER:
0543 arch_phys_wc_del(map->mtrr);
0544 break;
0545 case _DRM_SHM:
0546 vfree(map->handle);
0547 if (master) {
0548 if (dev->sigdata.lock == master->lock.hw_lock)
0549 dev->sigdata.lock = NULL;
0550 master->lock.hw_lock = NULL;
0551 master->lock.file_priv = NULL;
0552 wake_up_interruptible_all(&master->lock.lock_queue);
0553 }
0554 break;
0555 case _DRM_AGP:
0556 case _DRM_SCATTER_GATHER:
0557 break;
0558 case _DRM_CONSISTENT:
0559 dma_free_coherent(dev->dev,
0560 map->size,
0561 map->handle,
0562 map->offset);
0563 break;
0564 }
0565 kfree(map);
0566
0567 return 0;
0568 }
0569 EXPORT_SYMBOL(drm_legacy_rmmap_locked);
0570
0571 void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
0572 {
0573 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
0574 !drm_core_check_feature(dev, DRIVER_LEGACY))
0575 return;
0576
0577 mutex_lock(&dev->struct_mutex);
0578 drm_legacy_rmmap_locked(dev, map);
0579 mutex_unlock(&dev->struct_mutex);
0580 }
0581 EXPORT_SYMBOL(drm_legacy_rmmap);
0582
0583 void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
0584 {
0585 struct drm_map_list *r_list, *list_temp;
0586
0587 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
0588 return;
0589
0590 mutex_lock(&dev->struct_mutex);
0591 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
0592 if (r_list->master == master) {
0593 drm_legacy_rmmap_locked(dev, r_list->map);
0594 r_list = NULL;
0595 }
0596 }
0597 mutex_unlock(&dev->struct_mutex);
0598 }
0599
0600 void drm_legacy_rmmaps(struct drm_device *dev)
0601 {
0602 struct drm_map_list *r_list, *list_temp;
0603
0604 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
0605 drm_legacy_rmmap(dev, r_list->map);
0606 }
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
0624 struct drm_file *file_priv)
0625 {
0626 struct drm_map *request = data;
0627 struct drm_local_map *map = NULL;
0628 struct drm_map_list *r_list;
0629 int ret;
0630
0631 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
0632 !drm_core_check_feature(dev, DRIVER_LEGACY))
0633 return -EOPNOTSUPP;
0634
0635 mutex_lock(&dev->struct_mutex);
0636 list_for_each_entry(r_list, &dev->maplist, head) {
0637 if (r_list->map &&
0638 r_list->user_token == (unsigned long)request->handle &&
0639 r_list->map->flags & _DRM_REMOVABLE) {
0640 map = r_list->map;
0641 break;
0642 }
0643 }
0644
0645
0646
0647
0648 if (list_empty(&dev->maplist) || !map) {
0649 mutex_unlock(&dev->struct_mutex);
0650 return -EINVAL;
0651 }
0652
0653
0654 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
0655 mutex_unlock(&dev->struct_mutex);
0656 return 0;
0657 }
0658
0659 ret = drm_legacy_rmmap_locked(dev, map);
0660
0661 mutex_unlock(&dev->struct_mutex);
0662
0663 return ret;
0664 }
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674 static void drm_cleanup_buf_error(struct drm_device *dev,
0675 struct drm_buf_entry *entry)
0676 {
0677 drm_dma_handle_t *dmah;
0678 int i;
0679
0680 if (entry->seg_count) {
0681 for (i = 0; i < entry->seg_count; i++) {
0682 if (entry->seglist[i]) {
0683 dmah = entry->seglist[i];
0684 dma_free_coherent(dev->dev,
0685 dmah->size,
0686 dmah->vaddr,
0687 dmah->busaddr);
0688 kfree(dmah);
0689 }
0690 }
0691 kfree(entry->seglist);
0692
0693 entry->seg_count = 0;
0694 }
0695
0696 if (entry->buf_count) {
0697 for (i = 0; i < entry->buf_count; i++) {
0698 kfree(entry->buflist[i].dev_private);
0699 }
0700 kfree(entry->buflist);
0701
0702 entry->buf_count = 0;
0703 }
0704 }
0705
0706 #if IS_ENABLED(CONFIG_AGP)
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718 int drm_legacy_addbufs_agp(struct drm_device *dev,
0719 struct drm_buf_desc *request)
0720 {
0721 struct drm_device_dma *dma = dev->dma;
0722 struct drm_buf_entry *entry;
0723 struct drm_agp_mem *agp_entry;
0724 struct drm_buf *buf;
0725 unsigned long offset;
0726 unsigned long agp_offset;
0727 int count;
0728 int order;
0729 int size;
0730 int alignment;
0731 int page_order;
0732 int total;
0733 int byte_count;
0734 int i, valid;
0735 struct drm_buf **temp_buflist;
0736
0737 if (!dma)
0738 return -EINVAL;
0739
0740 count = request->count;
0741 order = order_base_2(request->size);
0742 size = 1 << order;
0743
0744 alignment = (request->flags & _DRM_PAGE_ALIGN)
0745 ? PAGE_ALIGN(size) : size;
0746 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
0747 total = PAGE_SIZE << page_order;
0748
0749 byte_count = 0;
0750 agp_offset = dev->agp->base + request->agp_start;
0751
0752 DRM_DEBUG("count: %d\n", count);
0753 DRM_DEBUG("order: %d\n", order);
0754 DRM_DEBUG("size: %d\n", size);
0755 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
0756 DRM_DEBUG("alignment: %d\n", alignment);
0757 DRM_DEBUG("page_order: %d\n", page_order);
0758 DRM_DEBUG("total: %d\n", total);
0759
0760 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
0761 return -EINVAL;
0762
0763
0764 valid = 0;
0765 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
0766 if ((agp_offset >= agp_entry->bound) &&
0767 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
0768 valid = 1;
0769 break;
0770 }
0771 }
0772 if (!list_empty(&dev->agp->memory) && !valid) {
0773 DRM_DEBUG("zone invalid\n");
0774 return -EINVAL;
0775 }
0776 spin_lock(&dev->buf_lock);
0777 if (dev->buf_use) {
0778 spin_unlock(&dev->buf_lock);
0779 return -EBUSY;
0780 }
0781 atomic_inc(&dev->buf_alloc);
0782 spin_unlock(&dev->buf_lock);
0783
0784 mutex_lock(&dev->struct_mutex);
0785 entry = &dma->bufs[order];
0786 if (entry->buf_count) {
0787 mutex_unlock(&dev->struct_mutex);
0788 atomic_dec(&dev->buf_alloc);
0789 return -ENOMEM;
0790 }
0791
0792 if (count < 0 || count > 4096) {
0793 mutex_unlock(&dev->struct_mutex);
0794 atomic_dec(&dev->buf_alloc);
0795 return -EINVAL;
0796 }
0797
0798 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
0799 if (!entry->buflist) {
0800 mutex_unlock(&dev->struct_mutex);
0801 atomic_dec(&dev->buf_alloc);
0802 return -ENOMEM;
0803 }
0804
0805 entry->buf_size = size;
0806 entry->page_order = page_order;
0807
0808 offset = 0;
0809
0810 while (entry->buf_count < count) {
0811 buf = &entry->buflist[entry->buf_count];
0812 buf->idx = dma->buf_count + entry->buf_count;
0813 buf->total = alignment;
0814 buf->order = order;
0815 buf->used = 0;
0816
0817 buf->offset = (dma->byte_count + offset);
0818 buf->bus_address = agp_offset + offset;
0819 buf->address = (void *)(agp_offset + offset);
0820 buf->next = NULL;
0821 buf->waiting = 0;
0822 buf->pending = 0;
0823 buf->file_priv = NULL;
0824
0825 buf->dev_priv_size = dev->driver->dev_priv_size;
0826 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
0827 if (!buf->dev_private) {
0828
0829 entry->buf_count = count;
0830 drm_cleanup_buf_error(dev, entry);
0831 mutex_unlock(&dev->struct_mutex);
0832 atomic_dec(&dev->buf_alloc);
0833 return -ENOMEM;
0834 }
0835
0836 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
0837
0838 offset += alignment;
0839 entry->buf_count++;
0840 byte_count += PAGE_SIZE << page_order;
0841 }
0842
0843 DRM_DEBUG("byte_count: %d\n", byte_count);
0844
0845 temp_buflist = krealloc(dma->buflist,
0846 (dma->buf_count + entry->buf_count) *
0847 sizeof(*dma->buflist), GFP_KERNEL);
0848 if (!temp_buflist) {
0849
0850 drm_cleanup_buf_error(dev, entry);
0851 mutex_unlock(&dev->struct_mutex);
0852 atomic_dec(&dev->buf_alloc);
0853 return -ENOMEM;
0854 }
0855 dma->buflist = temp_buflist;
0856
0857 for (i = 0; i < entry->buf_count; i++) {
0858 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
0859 }
0860
0861 dma->buf_count += entry->buf_count;
0862 dma->seg_count += entry->seg_count;
0863 dma->page_count += byte_count >> PAGE_SHIFT;
0864 dma->byte_count += byte_count;
0865
0866 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
0867 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
0868
0869 mutex_unlock(&dev->struct_mutex);
0870
0871 request->count = entry->buf_count;
0872 request->size = size;
0873
0874 dma->flags = _DRM_DMA_USE_AGP;
0875
0876 atomic_dec(&dev->buf_alloc);
0877 return 0;
0878 }
0879 EXPORT_SYMBOL(drm_legacy_addbufs_agp);
0880 #endif
0881
0882 int drm_legacy_addbufs_pci(struct drm_device *dev,
0883 struct drm_buf_desc *request)
0884 {
0885 struct drm_device_dma *dma = dev->dma;
0886 int count;
0887 int order;
0888 int size;
0889 int total;
0890 int page_order;
0891 struct drm_buf_entry *entry;
0892 drm_dma_handle_t *dmah;
0893 struct drm_buf *buf;
0894 int alignment;
0895 unsigned long offset;
0896 int i;
0897 int byte_count;
0898 int page_count;
0899 unsigned long *temp_pagelist;
0900 struct drm_buf **temp_buflist;
0901
0902 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
0903 return -EOPNOTSUPP;
0904
0905 if (!dma)
0906 return -EINVAL;
0907
0908 if (!capable(CAP_SYS_ADMIN))
0909 return -EPERM;
0910
0911 count = request->count;
0912 order = order_base_2(request->size);
0913 size = 1 << order;
0914
0915 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
0916 request->count, request->size, size, order);
0917
0918 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
0919 return -EINVAL;
0920
0921 alignment = (request->flags & _DRM_PAGE_ALIGN)
0922 ? PAGE_ALIGN(size) : size;
0923 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
0924 total = PAGE_SIZE << page_order;
0925
0926 spin_lock(&dev->buf_lock);
0927 if (dev->buf_use) {
0928 spin_unlock(&dev->buf_lock);
0929 return -EBUSY;
0930 }
0931 atomic_inc(&dev->buf_alloc);
0932 spin_unlock(&dev->buf_lock);
0933
0934 mutex_lock(&dev->struct_mutex);
0935 entry = &dma->bufs[order];
0936 if (entry->buf_count) {
0937 mutex_unlock(&dev->struct_mutex);
0938 atomic_dec(&dev->buf_alloc);
0939 return -ENOMEM;
0940 }
0941
0942 if (count < 0 || count > 4096) {
0943 mutex_unlock(&dev->struct_mutex);
0944 atomic_dec(&dev->buf_alloc);
0945 return -EINVAL;
0946 }
0947
0948 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
0949 if (!entry->buflist) {
0950 mutex_unlock(&dev->struct_mutex);
0951 atomic_dec(&dev->buf_alloc);
0952 return -ENOMEM;
0953 }
0954
0955 entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
0956 if (!entry->seglist) {
0957 kfree(entry->buflist);
0958 mutex_unlock(&dev->struct_mutex);
0959 atomic_dec(&dev->buf_alloc);
0960 return -ENOMEM;
0961 }
0962
0963
0964
0965
0966 temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
0967 sizeof(*dma->pagelist),
0968 GFP_KERNEL);
0969 if (!temp_pagelist) {
0970 kfree(entry->buflist);
0971 kfree(entry->seglist);
0972 mutex_unlock(&dev->struct_mutex);
0973 atomic_dec(&dev->buf_alloc);
0974 return -ENOMEM;
0975 }
0976 memcpy(temp_pagelist,
0977 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
0978 DRM_DEBUG("pagelist: %d entries\n",
0979 dma->page_count + (count << page_order));
0980
0981 entry->buf_size = size;
0982 entry->page_order = page_order;
0983 byte_count = 0;
0984 page_count = 0;
0985
0986 while (entry->buf_count < count) {
0987 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
0988 if (!dmah) {
0989
0990 entry->buf_count = count;
0991 entry->seg_count = count;
0992 drm_cleanup_buf_error(dev, entry);
0993 kfree(temp_pagelist);
0994 mutex_unlock(&dev->struct_mutex);
0995 atomic_dec(&dev->buf_alloc);
0996 return -ENOMEM;
0997 }
0998
0999 dmah->size = total;
1000 dmah->vaddr = dma_alloc_coherent(dev->dev,
1001 dmah->size,
1002 &dmah->busaddr,
1003 GFP_KERNEL);
1004 if (!dmah->vaddr) {
1005 kfree(dmah);
1006
1007
1008 entry->buf_count = count;
1009 entry->seg_count = count;
1010 drm_cleanup_buf_error(dev, entry);
1011 kfree(temp_pagelist);
1012 mutex_unlock(&dev->struct_mutex);
1013 atomic_dec(&dev->buf_alloc);
1014 return -ENOMEM;
1015 }
1016 entry->seglist[entry->seg_count++] = dmah;
1017 for (i = 0; i < (1 << page_order); i++) {
1018 DRM_DEBUG("page %d @ 0x%08lx\n",
1019 dma->page_count + page_count,
1020 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
1021 temp_pagelist[dma->page_count + page_count++]
1022 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
1023 }
1024 for (offset = 0;
1025 offset + size <= total && entry->buf_count < count;
1026 offset += alignment, ++entry->buf_count) {
1027 buf = &entry->buflist[entry->buf_count];
1028 buf->idx = dma->buf_count + entry->buf_count;
1029 buf->total = alignment;
1030 buf->order = order;
1031 buf->used = 0;
1032 buf->offset = (dma->byte_count + byte_count + offset);
1033 buf->address = (void *)(dmah->vaddr + offset);
1034 buf->bus_address = dmah->busaddr + offset;
1035 buf->next = NULL;
1036 buf->waiting = 0;
1037 buf->pending = 0;
1038 buf->file_priv = NULL;
1039
1040 buf->dev_priv_size = dev->driver->dev_priv_size;
1041 buf->dev_private = kzalloc(buf->dev_priv_size,
1042 GFP_KERNEL);
1043 if (!buf->dev_private) {
1044
1045 entry->buf_count = count;
1046 entry->seg_count = count;
1047 drm_cleanup_buf_error(dev, entry);
1048 kfree(temp_pagelist);
1049 mutex_unlock(&dev->struct_mutex);
1050 atomic_dec(&dev->buf_alloc);
1051 return -ENOMEM;
1052 }
1053
1054 DRM_DEBUG("buffer %d @ %p\n",
1055 entry->buf_count, buf->address);
1056 }
1057 byte_count += PAGE_SIZE << page_order;
1058 }
1059
1060 temp_buflist = krealloc(dma->buflist,
1061 (dma->buf_count + entry->buf_count) *
1062 sizeof(*dma->buflist), GFP_KERNEL);
1063 if (!temp_buflist) {
1064
1065 drm_cleanup_buf_error(dev, entry);
1066 kfree(temp_pagelist);
1067 mutex_unlock(&dev->struct_mutex);
1068 atomic_dec(&dev->buf_alloc);
1069 return -ENOMEM;
1070 }
1071 dma->buflist = temp_buflist;
1072
1073 for (i = 0; i < entry->buf_count; i++) {
1074 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1075 }
1076
1077
1078
1079
1080 if (dma->page_count) {
1081 kfree(dma->pagelist);
1082 }
1083 dma->pagelist = temp_pagelist;
1084
1085 dma->buf_count += entry->buf_count;
1086 dma->seg_count += entry->seg_count;
1087 dma->page_count += entry->seg_count << page_order;
1088 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
1089
1090 mutex_unlock(&dev->struct_mutex);
1091
1092 request->count = entry->buf_count;
1093 request->size = size;
1094
1095 if (request->flags & _DRM_PCI_BUFFER_RO)
1096 dma->flags = _DRM_DMA_USE_PCI_RO;
1097
1098 atomic_dec(&dev->buf_alloc);
1099 return 0;
1100
1101 }
1102 EXPORT_SYMBOL(drm_legacy_addbufs_pci);
1103
1104 static int drm_legacy_addbufs_sg(struct drm_device *dev,
1105 struct drm_buf_desc *request)
1106 {
1107 struct drm_device_dma *dma = dev->dma;
1108 struct drm_buf_entry *entry;
1109 struct drm_buf *buf;
1110 unsigned long offset;
1111 unsigned long agp_offset;
1112 int count;
1113 int order;
1114 int size;
1115 int alignment;
1116 int page_order;
1117 int total;
1118 int byte_count;
1119 int i;
1120 struct drm_buf **temp_buflist;
1121
1122 if (!drm_core_check_feature(dev, DRIVER_SG))
1123 return -EOPNOTSUPP;
1124
1125 if (!dma)
1126 return -EINVAL;
1127
1128 if (!capable(CAP_SYS_ADMIN))
1129 return -EPERM;
1130
1131 count = request->count;
1132 order = order_base_2(request->size);
1133 size = 1 << order;
1134
1135 alignment = (request->flags & _DRM_PAGE_ALIGN)
1136 ? PAGE_ALIGN(size) : size;
1137 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1138 total = PAGE_SIZE << page_order;
1139
1140 byte_count = 0;
1141 agp_offset = request->agp_start;
1142
1143 DRM_DEBUG("count: %d\n", count);
1144 DRM_DEBUG("order: %d\n", order);
1145 DRM_DEBUG("size: %d\n", size);
1146 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1147 DRM_DEBUG("alignment: %d\n", alignment);
1148 DRM_DEBUG("page_order: %d\n", page_order);
1149 DRM_DEBUG("total: %d\n", total);
1150
1151 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1152 return -EINVAL;
1153
1154 spin_lock(&dev->buf_lock);
1155 if (dev->buf_use) {
1156 spin_unlock(&dev->buf_lock);
1157 return -EBUSY;
1158 }
1159 atomic_inc(&dev->buf_alloc);
1160 spin_unlock(&dev->buf_lock);
1161
1162 mutex_lock(&dev->struct_mutex);
1163 entry = &dma->bufs[order];
1164 if (entry->buf_count) {
1165 mutex_unlock(&dev->struct_mutex);
1166 atomic_dec(&dev->buf_alloc);
1167 return -ENOMEM;
1168 }
1169
1170 if (count < 0 || count > 4096) {
1171 mutex_unlock(&dev->struct_mutex);
1172 atomic_dec(&dev->buf_alloc);
1173 return -EINVAL;
1174 }
1175
1176 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
1177 if (!entry->buflist) {
1178 mutex_unlock(&dev->struct_mutex);
1179 atomic_dec(&dev->buf_alloc);
1180 return -ENOMEM;
1181 }
1182
1183 entry->buf_size = size;
1184 entry->page_order = page_order;
1185
1186 offset = 0;
1187
1188 while (entry->buf_count < count) {
1189 buf = &entry->buflist[entry->buf_count];
1190 buf->idx = dma->buf_count + entry->buf_count;
1191 buf->total = alignment;
1192 buf->order = order;
1193 buf->used = 0;
1194
1195 buf->offset = (dma->byte_count + offset);
1196 buf->bus_address = agp_offset + offset;
1197 buf->address = (void *)(agp_offset + offset
1198 + (unsigned long)dev->sg->virtual);
1199 buf->next = NULL;
1200 buf->waiting = 0;
1201 buf->pending = 0;
1202 buf->file_priv = NULL;
1203
1204 buf->dev_priv_size = dev->driver->dev_priv_size;
1205 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1206 if (!buf->dev_private) {
1207
1208 entry->buf_count = count;
1209 drm_cleanup_buf_error(dev, entry);
1210 mutex_unlock(&dev->struct_mutex);
1211 atomic_dec(&dev->buf_alloc);
1212 return -ENOMEM;
1213 }
1214
1215 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1216
1217 offset += alignment;
1218 entry->buf_count++;
1219 byte_count += PAGE_SIZE << page_order;
1220 }
1221
1222 DRM_DEBUG("byte_count: %d\n", byte_count);
1223
1224 temp_buflist = krealloc(dma->buflist,
1225 (dma->buf_count + entry->buf_count) *
1226 sizeof(*dma->buflist), GFP_KERNEL);
1227 if (!temp_buflist) {
1228
1229 drm_cleanup_buf_error(dev, entry);
1230 mutex_unlock(&dev->struct_mutex);
1231 atomic_dec(&dev->buf_alloc);
1232 return -ENOMEM;
1233 }
1234 dma->buflist = temp_buflist;
1235
1236 for (i = 0; i < entry->buf_count; i++) {
1237 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1238 }
1239
1240 dma->buf_count += entry->buf_count;
1241 dma->seg_count += entry->seg_count;
1242 dma->page_count += byte_count >> PAGE_SHIFT;
1243 dma->byte_count += byte_count;
1244
1245 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1246 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1247
1248 mutex_unlock(&dev->struct_mutex);
1249
1250 request->count = entry->buf_count;
1251 request->size = size;
1252
1253 dma->flags = _DRM_DMA_USE_SG;
1254
1255 atomic_dec(&dev->buf_alloc);
1256 return 0;
1257 }
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273 int drm_legacy_addbufs(struct drm_device *dev, void *data,
1274 struct drm_file *file_priv)
1275 {
1276 struct drm_buf_desc *request = data;
1277 int ret;
1278
1279 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1280 return -EOPNOTSUPP;
1281
1282 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1283 return -EOPNOTSUPP;
1284
1285 #if IS_ENABLED(CONFIG_AGP)
1286 if (request->flags & _DRM_AGP_BUFFER)
1287 ret = drm_legacy_addbufs_agp(dev, request);
1288 else
1289 #endif
1290 if (request->flags & _DRM_SG_BUFFER)
1291 ret = drm_legacy_addbufs_sg(dev, request);
1292 else if (request->flags & _DRM_FB_BUFFER)
1293 ret = -EINVAL;
1294 else
1295 ret = drm_legacy_addbufs_pci(dev, request);
1296
1297 return ret;
1298 }
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317 int __drm_legacy_infobufs(struct drm_device *dev,
1318 void *data, int *p,
1319 int (*f)(void *, int, struct drm_buf_entry *))
1320 {
1321 struct drm_device_dma *dma = dev->dma;
1322 int i;
1323 int count;
1324
1325 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1326 return -EOPNOTSUPP;
1327
1328 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1329 return -EOPNOTSUPP;
1330
1331 if (!dma)
1332 return -EINVAL;
1333
1334 spin_lock(&dev->buf_lock);
1335 if (atomic_read(&dev->buf_alloc)) {
1336 spin_unlock(&dev->buf_lock);
1337 return -EBUSY;
1338 }
1339 ++dev->buf_use;
1340 spin_unlock(&dev->buf_lock);
1341
1342 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1343 if (dma->bufs[i].buf_count)
1344 ++count;
1345 }
1346
1347 DRM_DEBUG("count = %d\n", count);
1348
1349 if (*p >= count) {
1350 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1351 struct drm_buf_entry *from = &dma->bufs[i];
1352
1353 if (from->buf_count) {
1354 if (f(data, count, from) < 0)
1355 return -EFAULT;
1356 DRM_DEBUG("%d %d %d %d %d\n",
1357 i,
1358 dma->bufs[i].buf_count,
1359 dma->bufs[i].buf_size,
1360 dma->bufs[i].low_mark,
1361 dma->bufs[i].high_mark);
1362 ++count;
1363 }
1364 }
1365 }
1366 *p = count;
1367
1368 return 0;
1369 }
1370
1371 static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
1372 {
1373 struct drm_buf_info *request = data;
1374 struct drm_buf_desc __user *to = &request->list[count];
1375 struct drm_buf_desc v = {.count = from->buf_count,
1376 .size = from->buf_size,
1377 .low_mark = from->low_mark,
1378 .high_mark = from->high_mark};
1379
1380 if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
1381 return -EFAULT;
1382 return 0;
1383 }
1384
1385 int drm_legacy_infobufs(struct drm_device *dev, void *data,
1386 struct drm_file *file_priv)
1387 {
1388 struct drm_buf_info *request = data;
1389
1390 return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
1391 }
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407 int drm_legacy_markbufs(struct drm_device *dev, void *data,
1408 struct drm_file *file_priv)
1409 {
1410 struct drm_device_dma *dma = dev->dma;
1411 struct drm_buf_desc *request = data;
1412 int order;
1413 struct drm_buf_entry *entry;
1414
1415 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1416 return -EOPNOTSUPP;
1417
1418 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1419 return -EOPNOTSUPP;
1420
1421 if (!dma)
1422 return -EINVAL;
1423
1424 DRM_DEBUG("%d, %d, %d\n",
1425 request->size, request->low_mark, request->high_mark);
1426 order = order_base_2(request->size);
1427 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1428 return -EINVAL;
1429 entry = &dma->bufs[order];
1430
1431 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1432 return -EINVAL;
1433 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1434 return -EINVAL;
1435
1436 entry->low_mark = request->low_mark;
1437 entry->high_mark = request->high_mark;
1438
1439 return 0;
1440 }
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454 int drm_legacy_freebufs(struct drm_device *dev, void *data,
1455 struct drm_file *file_priv)
1456 {
1457 struct drm_device_dma *dma = dev->dma;
1458 struct drm_buf_free *request = data;
1459 int i;
1460 int idx;
1461 struct drm_buf *buf;
1462
1463 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1464 return -EOPNOTSUPP;
1465
1466 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1467 return -EOPNOTSUPP;
1468
1469 if (!dma)
1470 return -EINVAL;
1471
1472 DRM_DEBUG("%d\n", request->count);
1473 for (i = 0; i < request->count; i++) {
1474 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1475 return -EFAULT;
1476 if (idx < 0 || idx >= dma->buf_count) {
1477 DRM_ERROR("Index %d (of %d max)\n",
1478 idx, dma->buf_count - 1);
1479 return -EINVAL;
1480 }
1481 idx = array_index_nospec(idx, dma->buf_count);
1482 buf = dma->buflist[idx];
1483 if (buf->file_priv != file_priv) {
1484 DRM_ERROR("Process %d freeing buffer not owned\n",
1485 task_pid_nr(current));
1486 return -EINVAL;
1487 }
1488 drm_legacy_free_buffer(dev, buf);
1489 }
1490
1491 return 0;
1492 }
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508 int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
1509 void __user **v,
1510 int (*f)(void *, int, unsigned long,
1511 struct drm_buf *),
1512 struct drm_file *file_priv)
1513 {
1514 struct drm_device_dma *dma = dev->dma;
1515 int retcode = 0;
1516 unsigned long virtual;
1517 int i;
1518
1519 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1520 return -EOPNOTSUPP;
1521
1522 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1523 return -EOPNOTSUPP;
1524
1525 if (!dma)
1526 return -EINVAL;
1527
1528 spin_lock(&dev->buf_lock);
1529 if (atomic_read(&dev->buf_alloc)) {
1530 spin_unlock(&dev->buf_lock);
1531 return -EBUSY;
1532 }
1533 dev->buf_use++;
1534 spin_unlock(&dev->buf_lock);
1535
1536 if (*p >= dma->buf_count) {
1537 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
1538 || (drm_core_check_feature(dev, DRIVER_SG)
1539 && (dma->flags & _DRM_DMA_USE_SG))) {
1540 struct drm_local_map *map = dev->agp_buffer_map;
1541 unsigned long token = dev->agp_buffer_token;
1542
1543 if (!map) {
1544 retcode = -EINVAL;
1545 goto done;
1546 }
1547 virtual = vm_mmap(file_priv->filp, 0, map->size,
1548 PROT_READ | PROT_WRITE,
1549 MAP_SHARED,
1550 token);
1551 } else {
1552 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1553 PROT_READ | PROT_WRITE,
1554 MAP_SHARED, 0);
1555 }
1556 if (virtual > -1024UL) {
1557
1558 retcode = (signed long)virtual;
1559 goto done;
1560 }
1561 *v = (void __user *)virtual;
1562
1563 for (i = 0; i < dma->buf_count; i++) {
1564 if (f(data, i, virtual, dma->buflist[i]) < 0) {
1565 retcode = -EFAULT;
1566 goto done;
1567 }
1568 }
1569 }
1570 done:
1571 *p = dma->buf_count;
1572 DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
1573
1574 return retcode;
1575 }
1576
1577 static int map_one_buf(void *data, int idx, unsigned long virtual,
1578 struct drm_buf *buf)
1579 {
1580 struct drm_buf_map *request = data;
1581 unsigned long address = virtual + buf->offset;
1582
1583 if (copy_to_user(&request->list[idx].idx, &buf->idx,
1584 sizeof(request->list[0].idx)))
1585 return -EFAULT;
1586 if (copy_to_user(&request->list[idx].total, &buf->total,
1587 sizeof(request->list[0].total)))
1588 return -EFAULT;
1589 if (clear_user(&request->list[idx].used, sizeof(int)))
1590 return -EFAULT;
1591 if (copy_to_user(&request->list[idx].address, &address,
1592 sizeof(address)))
1593 return -EFAULT;
1594 return 0;
1595 }
1596
1597 int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1598 struct drm_file *file_priv)
1599 {
1600 struct drm_buf_map *request = data;
1601
1602 return __drm_legacy_mapbufs(dev, data, &request->count,
1603 &request->virtual, map_one_buf,
1604 file_priv);
1605 }
1606
1607 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1608 struct drm_file *file_priv)
1609 {
1610 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1611 return -EOPNOTSUPP;
1612
1613 if (dev->driver->dma_ioctl)
1614 return dev->driver->dma_ioctl(dev, data, file_priv);
1615 else
1616 return -EINVAL;
1617 }
1618
1619 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1620 {
1621 struct drm_map_list *entry;
1622
1623 list_for_each_entry(entry, &dev->maplist, head) {
1624 if (entry->map && entry->map->type == _DRM_SHM &&
1625 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1626 return entry->map;
1627 }
1628 }
1629 return NULL;
1630 }
1631 EXPORT_SYMBOL(drm_legacy_getsarea);