0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/dma-mapping.h>
0010 #include <linux/kernel.h>
0011 #include <linux/export.h>
0012 #include <linux/memblock.h>
0013 #include <linux/slab.h>
0014
0015 #include <asm/cell-regs.h>
0016 #include <asm/firmware.h>
0017 #include <asm/udbg.h>
0018 #include <asm/lv1call.h>
0019 #include <asm/setup.h>
0020
0021 #include "platform.h"
0022
0023 #if defined(DEBUG)
0024 #define DBG udbg_printf
0025 #else
0026 #define DBG pr_devel
0027 #endif
0028
0029 enum {
0030 #if defined(CONFIG_PS3_DYNAMIC_DMA)
0031 USE_DYNAMIC_DMA = 1,
0032 #else
0033 USE_DYNAMIC_DMA = 0,
0034 #endif
0035 };
0036
0037 enum {
0038 PAGE_SHIFT_4K = 12U,
0039 PAGE_SHIFT_64K = 16U,
0040 PAGE_SHIFT_16M = 24U,
0041 };
0042
0043 static unsigned long __init make_page_sizes(unsigned long a, unsigned long b)
0044 {
0045 return (a << 56) | (b << 48);
0046 }
0047
0048 enum {
0049 ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
0050 ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
0051 };
0052
0053
0054
0055 enum {
0056 HTAB_SIZE_MAX = 20U,
0057 HTAB_SIZE_MIN = 18U,
0058 };
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072 struct mem_region {
0073 u64 base;
0074 u64 size;
0075 unsigned long offset;
0076 int destroy;
0077 };
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097 struct map {
0098 u64 total;
0099 u64 vas_id;
0100 u64 htab_size;
0101 struct mem_region rm;
0102 struct mem_region r1;
0103 };
0104
0105 #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
0106 static void __maybe_unused _debug_dump_map(const struct map *m,
0107 const char *func, int line)
0108 {
0109 DBG("%s:%d: map.total = %llxh\n", func, line, m->total);
0110 DBG("%s:%d: map.rm.size = %llxh\n", func, line, m->rm.size);
0111 DBG("%s:%d: map.vas_id = %llu\n", func, line, m->vas_id);
0112 DBG("%s:%d: map.htab_size = %llxh\n", func, line, m->htab_size);
0113 DBG("%s:%d: map.r1.base = %llxh\n", func, line, m->r1.base);
0114 DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
0115 DBG("%s:%d: map.r1.size = %llxh\n", func, line, m->r1.size);
0116 }
0117
0118 static struct map map;
0119
0120
0121
0122
0123
0124
0125 unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
0126 {
0127 BUG_ON(is_kernel_addr(phys_addr));
0128 return (phys_addr < map.rm.size || phys_addr >= map.total)
0129 ? phys_addr : phys_addr + map.r1.offset;
0130 }
0131
0132 EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
0133
0134
0135
0136
0137
0138 void __init ps3_mm_vas_create(unsigned long* htab_size)
0139 {
0140 int result;
0141 u64 start_address;
0142 u64 size;
0143 u64 access_right;
0144 u64 max_page_size;
0145 u64 flags;
0146
0147 result = lv1_query_logical_partition_address_region_info(0,
0148 &start_address, &size, &access_right, &max_page_size,
0149 &flags);
0150
0151 if (result) {
0152 DBG("%s:%d: lv1_query_logical_partition_address_region_info "
0153 "failed: %s\n", __func__, __LINE__,
0154 ps3_result(result));
0155 goto fail;
0156 }
0157
0158 if (max_page_size < PAGE_SHIFT_16M) {
0159 DBG("%s:%d: bad max_page_size %llxh\n", __func__, __LINE__,
0160 max_page_size);
0161 goto fail;
0162 }
0163
0164 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
0165 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
0166
0167 result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
0168 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
0169 &map.vas_id, &map.htab_size);
0170
0171 if (result) {
0172 DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
0173 __func__, __LINE__, ps3_result(result));
0174 goto fail;
0175 }
0176
0177 result = lv1_select_virtual_address_space(map.vas_id);
0178
0179 if (result) {
0180 DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
0181 __func__, __LINE__, ps3_result(result));
0182 goto fail;
0183 }
0184
0185 *htab_size = map.htab_size;
0186
0187 debug_dump_map(&map);
0188
0189 return;
0190
0191 fail:
0192 panic("ps3_mm_vas_create failed");
0193 }
0194
0195
0196
0197
0198
0199
0200
0201 notrace void ps3_mm_vas_destroy(void)
0202 {
0203 int result;
0204
0205 if (map.vas_id) {
0206 result = lv1_select_virtual_address_space(0);
0207 result += lv1_destruct_virtual_address_space(map.vas_id);
0208
0209 if (result) {
0210 lv1_panic(0);
0211 }
0212
0213 map.vas_id = 0;
0214 }
0215 }
0216
0217 static int __init ps3_mm_get_repository_highmem(struct mem_region *r)
0218 {
0219 int result;
0220
0221
0222
0223 result = ps3_repository_read_highmem_info(0, &r->base, &r->size);
0224
0225 if (result)
0226 goto zero_region;
0227
0228 if (!r->base || !r->size) {
0229 result = -1;
0230 goto zero_region;
0231 }
0232
0233 r->offset = r->base - map.rm.size;
0234
0235 DBG("%s:%d: Found high region in repository: %llxh %llxh\n",
0236 __func__, __LINE__, r->base, r->size);
0237
0238 return 0;
0239
0240 zero_region:
0241 DBG("%s:%d: No high region in repository.\n", __func__, __LINE__);
0242
0243 r->size = r->base = r->offset = 0;
0244 return result;
0245 }
0246
0247 static int ps3_mm_set_repository_highmem(const struct mem_region *r)
0248 {
0249
0250
0251 return r ? ps3_repository_write_highmem_info(0, r->base, r->size) :
0252 ps3_repository_write_highmem_info(0, 0, 0);
0253 }
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264 static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
0265 {
0266 int result;
0267 u64 muid;
0268
0269 r->size = ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
0270
0271 DBG("%s:%d requested %lxh\n", __func__, __LINE__, size);
0272 DBG("%s:%d actual %llxh\n", __func__, __LINE__, r->size);
0273 DBG("%s:%d difference %llxh (%lluMB)\n", __func__, __LINE__,
0274 size - r->size, (size - r->size) / 1024 / 1024);
0275
0276 if (r->size == 0) {
0277 DBG("%s:%d: size == 0\n", __func__, __LINE__);
0278 result = -1;
0279 goto zero_region;
0280 }
0281
0282 result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
0283 ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
0284
0285 if (result || r->base < map.rm.size) {
0286 DBG("%s:%d: lv1_allocate_memory failed: %s\n",
0287 __func__, __LINE__, ps3_result(result));
0288 goto zero_region;
0289 }
0290
0291 r->destroy = 1;
0292 r->offset = r->base - map.rm.size;
0293 return result;
0294
0295 zero_region:
0296 r->size = r->base = r->offset = 0;
0297 return result;
0298 }
0299
0300
0301
0302
0303
0304
0305 static void ps3_mm_region_destroy(struct mem_region *r)
0306 {
0307 int result;
0308
0309 if (!r->destroy) {
0310 return;
0311 }
0312
0313 if (r->base) {
0314 result = lv1_release_memory(r->base);
0315
0316 if (result) {
0317 lv1_panic(0);
0318 }
0319
0320 r->size = r->base = r->offset = 0;
0321 map.total = map.rm.size;
0322 }
0323
0324 ps3_mm_set_repository_highmem(NULL);
0325 }
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337 static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r,
0338 unsigned long lpar_addr)
0339 {
0340 if (lpar_addr >= map.rm.size)
0341 lpar_addr -= map.r1.offset;
0342 BUG_ON(lpar_addr < r->offset);
0343 BUG_ON(lpar_addr >= r->offset + r->len);
0344 return r->bus_addr + lpar_addr - r->offset;
0345 }
0346
0347 #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
0348 static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
0349 const char *func, int line)
0350 {
0351 DBG("%s:%d: dev %llu:%llu\n", func, line, r->dev->bus_id,
0352 r->dev->dev_id);
0353 DBG("%s:%d: page_size %u\n", func, line, r->page_size);
0354 DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
0355 DBG("%s:%d: len %lxh\n", func, line, r->len);
0356 DBG("%s:%d: offset %lxh\n", func, line, r->offset);
0357 }
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373 struct dma_chunk {
0374 struct ps3_dma_region *region;
0375 unsigned long lpar_addr;
0376 unsigned long bus_addr;
0377 unsigned long len;
0378 struct list_head link;
0379 unsigned int usage_count;
0380 };
0381
0382 #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
0383 static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
0384 int line)
0385 {
0386 DBG("%s:%d: r.dev %llu:%llu\n", func, line,
0387 c->region->dev->bus_id, c->region->dev->dev_id);
0388 DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr);
0389 DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size);
0390 DBG("%s:%d: r.len %lxh\n", func, line, c->region->len);
0391 DBG("%s:%d: r.offset %lxh\n", func, line, c->region->offset);
0392 DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr);
0393 DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr);
0394 DBG("%s:%d: c.len %lxh\n", func, line, c->len);
0395 }
0396
0397 static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
0398 unsigned long bus_addr, unsigned long len)
0399 {
0400 struct dma_chunk *c;
0401 unsigned long aligned_bus = ALIGN_DOWN(bus_addr, 1 << r->page_size);
0402 unsigned long aligned_len = ALIGN(len+bus_addr-aligned_bus,
0403 1 << r->page_size);
0404
0405 list_for_each_entry(c, &r->chunk_list.head, link) {
0406
0407 if (aligned_bus >= c->bus_addr &&
0408 aligned_bus + aligned_len <= c->bus_addr + c->len)
0409 return c;
0410
0411
0412 if (aligned_bus + aligned_len <= c->bus_addr)
0413 continue;
0414
0415
0416 if (aligned_bus >= c->bus_addr + c->len)
0417 continue;
0418
0419
0420 dma_dump_chunk(c);
0421 BUG();
0422 }
0423 return NULL;
0424 }
0425
0426 static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r,
0427 unsigned long lpar_addr, unsigned long len)
0428 {
0429 struct dma_chunk *c;
0430 unsigned long aligned_lpar = ALIGN_DOWN(lpar_addr, 1 << r->page_size);
0431 unsigned long aligned_len = ALIGN(len + lpar_addr - aligned_lpar,
0432 1 << r->page_size);
0433
0434 list_for_each_entry(c, &r->chunk_list.head, link) {
0435
0436 if (c->lpar_addr <= aligned_lpar &&
0437 aligned_lpar < c->lpar_addr + c->len) {
0438 if (aligned_lpar + aligned_len <= c->lpar_addr + c->len)
0439 return c;
0440 else {
0441 dma_dump_chunk(c);
0442 BUG();
0443 }
0444 }
0445
0446 if (aligned_lpar + aligned_len <= c->lpar_addr) {
0447 continue;
0448 }
0449
0450 if (c->lpar_addr + c->len <= aligned_lpar) {
0451 continue;
0452 }
0453 }
0454 return NULL;
0455 }
0456
0457 static int dma_sb_free_chunk(struct dma_chunk *c)
0458 {
0459 int result = 0;
0460
0461 if (c->bus_addr) {
0462 result = lv1_unmap_device_dma_region(c->region->dev->bus_id,
0463 c->region->dev->dev_id, c->bus_addr, c->len);
0464 BUG_ON(result);
0465 }
0466
0467 kfree(c);
0468 return result;
0469 }
0470
0471 static int dma_ioc0_free_chunk(struct dma_chunk *c)
0472 {
0473 int result = 0;
0474 int iopage;
0475 unsigned long offset;
0476 struct ps3_dma_region *r = c->region;
0477
0478 DBG("%s:start\n", __func__);
0479 for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) {
0480 offset = (1 << r->page_size) * iopage;
0481
0482 result = lv1_put_iopte(0,
0483 c->bus_addr + offset,
0484 c->lpar_addr + offset,
0485 r->ioid,
0486 0);
0487 DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__,
0488 c->bus_addr + offset,
0489 c->lpar_addr + offset,
0490 r->ioid);
0491
0492 if (result) {
0493 DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__,
0494 __LINE__, ps3_result(result));
0495 }
0496 }
0497 kfree(c);
0498 DBG("%s:end\n", __func__);
0499 return result;
0500 }
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513 static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
0514 unsigned long len, struct dma_chunk **c_out, u64 iopte_flag)
0515 {
0516 int result;
0517 struct dma_chunk *c;
0518
0519 c = kzalloc(sizeof(*c), GFP_ATOMIC);
0520 if (!c) {
0521 result = -ENOMEM;
0522 goto fail_alloc;
0523 }
0524
0525 c->region = r;
0526 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
0527 c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr);
0528 c->len = len;
0529
0530 BUG_ON(iopte_flag != 0xf800000000000000UL);
0531 result = lv1_map_device_dma_region(c->region->dev->bus_id,
0532 c->region->dev->dev_id, c->lpar_addr,
0533 c->bus_addr, c->len, iopte_flag);
0534 if (result) {
0535 DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
0536 __func__, __LINE__, ps3_result(result));
0537 goto fail_map;
0538 }
0539
0540 list_add(&c->link, &r->chunk_list.head);
0541
0542 *c_out = c;
0543 return 0;
0544
0545 fail_map:
0546 kfree(c);
0547 fail_alloc:
0548 *c_out = NULL;
0549 DBG(" <- %s:%d\n", __func__, __LINE__);
0550 return result;
0551 }
0552
0553 static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
0554 unsigned long len, struct dma_chunk **c_out,
0555 u64 iopte_flag)
0556 {
0557 int result;
0558 struct dma_chunk *c, *last;
0559 int iopage, pages;
0560 unsigned long offset;
0561
0562 DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__,
0563 phys_addr, ps3_mm_phys_to_lpar(phys_addr), len);
0564 c = kzalloc(sizeof(*c), GFP_ATOMIC);
0565 if (!c) {
0566 result = -ENOMEM;
0567 goto fail_alloc;
0568 }
0569
0570 c->region = r;
0571 c->len = len;
0572 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
0573
0574 if (list_empty(&r->chunk_list.head)) {
0575
0576 c->bus_addr = r->bus_addr;
0577 } else {
0578
0579 last = list_entry(r->chunk_list.head.next,
0580 struct dma_chunk, link);
0581 c->bus_addr = last->bus_addr + last->len;
0582 DBG("%s: last bus=%#lx, len=%#lx\n", __func__,
0583 last->bus_addr, last->len);
0584 }
0585
0586
0587
0588
0589 pages = len >> r->page_size;
0590 DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#llx\n", __func__,
0591 r->page_size, r->len, pages, iopte_flag);
0592 for (iopage = 0; iopage < pages; iopage++) {
0593 offset = (1 << r->page_size) * iopage;
0594 result = lv1_put_iopte(0,
0595 c->bus_addr + offset,
0596 c->lpar_addr + offset,
0597 r->ioid,
0598 iopte_flag);
0599 if (result) {
0600 pr_warn("%s:%d: lv1_put_iopte failed: %s\n",
0601 __func__, __LINE__, ps3_result(result));
0602 goto fail_map;
0603 }
0604 DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__,
0605 iopage, c->bus_addr + offset, c->lpar_addr + offset,
0606 r->ioid);
0607 }
0608
0609
0610 list_add(&c->link, &r->chunk_list.head);
0611
0612 *c_out = c;
0613 DBG("%s: end\n", __func__);
0614 return 0;
0615
0616 fail_map:
0617 for (iopage--; 0 <= iopage; iopage--) {
0618 lv1_put_iopte(0,
0619 c->bus_addr + offset,
0620 c->lpar_addr + offset,
0621 r->ioid,
0622 0);
0623 }
0624 kfree(c);
0625 fail_alloc:
0626 *c_out = NULL;
0627 return result;
0628 }
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638 static int dma_sb_region_create(struct ps3_dma_region *r)
0639 {
0640 int result;
0641 u64 bus_addr;
0642
0643 DBG(" -> %s:%d:\n", __func__, __LINE__);
0644
0645 BUG_ON(!r);
0646
0647 if (!r->dev->bus_id) {
0648 pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
0649 r->dev->bus_id, r->dev->dev_id);
0650 return 0;
0651 }
0652
0653 DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__,
0654 __LINE__, r->len, r->page_size, r->offset);
0655
0656 BUG_ON(!r->len);
0657 BUG_ON(!r->page_size);
0658 BUG_ON(!r->region_ops);
0659
0660 INIT_LIST_HEAD(&r->chunk_list.head);
0661 spin_lock_init(&r->chunk_list.lock);
0662
0663 result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id,
0664 roundup_pow_of_two(r->len), r->page_size, r->region_type,
0665 &bus_addr);
0666 r->bus_addr = bus_addr;
0667
0668 if (result) {
0669 DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
0670 __func__, __LINE__, ps3_result(result));
0671 r->len = r->bus_addr = 0;
0672 }
0673
0674 return result;
0675 }
0676
0677 static int dma_ioc0_region_create(struct ps3_dma_region *r)
0678 {
0679 int result;
0680 u64 bus_addr;
0681
0682 INIT_LIST_HEAD(&r->chunk_list.head);
0683 spin_lock_init(&r->chunk_list.lock);
0684
0685 result = lv1_allocate_io_segment(0,
0686 r->len,
0687 r->page_size,
0688 &bus_addr);
0689 r->bus_addr = bus_addr;
0690 if (result) {
0691 DBG("%s:%d: lv1_allocate_io_segment failed: %s\n",
0692 __func__, __LINE__, ps3_result(result));
0693 r->len = r->bus_addr = 0;
0694 }
0695 DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__,
0696 r->len, r->page_size, r->bus_addr);
0697 return result;
0698 }
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708 static int dma_sb_region_free(struct ps3_dma_region *r)
0709 {
0710 int result;
0711 struct dma_chunk *c;
0712 struct dma_chunk *tmp;
0713
0714 BUG_ON(!r);
0715
0716 if (!r->dev->bus_id) {
0717 pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
0718 r->dev->bus_id, r->dev->dev_id);
0719 return 0;
0720 }
0721
0722 list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
0723 list_del(&c->link);
0724 dma_sb_free_chunk(c);
0725 }
0726
0727 result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id,
0728 r->bus_addr);
0729
0730 if (result)
0731 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
0732 __func__, __LINE__, ps3_result(result));
0733
0734 r->bus_addr = 0;
0735
0736 return result;
0737 }
0738
0739 static int dma_ioc0_region_free(struct ps3_dma_region *r)
0740 {
0741 int result;
0742 struct dma_chunk *c, *n;
0743
0744 DBG("%s: start\n", __func__);
0745 list_for_each_entry_safe(c, n, &r->chunk_list.head, link) {
0746 list_del(&c->link);
0747 dma_ioc0_free_chunk(c);
0748 }
0749
0750 result = lv1_release_io_segment(0, r->bus_addr);
0751
0752 if (result)
0753 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
0754 __func__, __LINE__, ps3_result(result));
0755
0756 r->bus_addr = 0;
0757 DBG("%s: end\n", __func__);
0758
0759 return result;
0760 }
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773 static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
0774 unsigned long len, dma_addr_t *bus_addr,
0775 u64 iopte_flag)
0776 {
0777 int result;
0778 unsigned long flags;
0779 struct dma_chunk *c;
0780 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
0781 : virt_addr;
0782 unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size);
0783 unsigned long aligned_len = ALIGN(len + phys_addr - aligned_phys,
0784 1 << r->page_size);
0785 *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
0786
0787 if (!USE_DYNAMIC_DMA) {
0788 unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
0789 DBG(" -> %s:%d\n", __func__, __LINE__);
0790 DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
0791 virt_addr);
0792 DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
0793 phys_addr);
0794 DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
0795 lpar_addr);
0796 DBG("%s:%d len %lxh\n", __func__, __LINE__, len);
0797 DBG("%s:%d bus_addr %llxh (%lxh)\n", __func__, __LINE__,
0798 *bus_addr, len);
0799 }
0800
0801 spin_lock_irqsave(&r->chunk_list.lock, flags);
0802 c = dma_find_chunk(r, *bus_addr, len);
0803
0804 if (c) {
0805 DBG("%s:%d: reusing mapped chunk", __func__, __LINE__);
0806 dma_dump_chunk(c);
0807 c->usage_count++;
0808 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
0809 return 0;
0810 }
0811
0812 result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag);
0813
0814 if (result) {
0815 *bus_addr = 0;
0816 DBG("%s:%d: dma_sb_map_pages failed (%d)\n",
0817 __func__, __LINE__, result);
0818 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
0819 return result;
0820 }
0821
0822 c->usage_count = 1;
0823
0824 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
0825 return result;
0826 }
0827
0828 static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
0829 unsigned long len, dma_addr_t *bus_addr,
0830 u64 iopte_flag)
0831 {
0832 int result;
0833 unsigned long flags;
0834 struct dma_chunk *c;
0835 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
0836 : virt_addr;
0837 unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size);
0838 unsigned long aligned_len = ALIGN(len + phys_addr - aligned_phys,
0839 1 << r->page_size);
0840
0841 DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__,
0842 virt_addr, len);
0843 DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__,
0844 phys_addr, aligned_phys, aligned_len);
0845
0846 spin_lock_irqsave(&r->chunk_list.lock, flags);
0847 c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len);
0848
0849 if (c) {
0850
0851 BUG();
0852 *bus_addr = c->bus_addr + phys_addr - aligned_phys;
0853 c->usage_count++;
0854 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
0855 return 0;
0856 }
0857
0858 result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c,
0859 iopte_flag);
0860
0861 if (result) {
0862 *bus_addr = 0;
0863 DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n",
0864 __func__, __LINE__, result);
0865 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
0866 return result;
0867 }
0868 *bus_addr = c->bus_addr + phys_addr - aligned_phys;
0869 DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#llx\n", __func__,
0870 virt_addr, phys_addr, aligned_phys, *bus_addr);
0871 c->usage_count = 1;
0872
0873 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
0874 return result;
0875 }
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886 static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr,
0887 unsigned long len)
0888 {
0889 unsigned long flags;
0890 struct dma_chunk *c;
0891
0892 spin_lock_irqsave(&r->chunk_list.lock, flags);
0893 c = dma_find_chunk(r, bus_addr, len);
0894
0895 if (!c) {
0896 unsigned long aligned_bus = ALIGN_DOWN(bus_addr,
0897 1 << r->page_size);
0898 unsigned long aligned_len = ALIGN(len + bus_addr
0899 - aligned_bus, 1 << r->page_size);
0900 DBG("%s:%d: not found: bus_addr %llxh\n",
0901 __func__, __LINE__, bus_addr);
0902 DBG("%s:%d: not found: len %lxh\n",
0903 __func__, __LINE__, len);
0904 DBG("%s:%d: not found: aligned_bus %lxh\n",
0905 __func__, __LINE__, aligned_bus);
0906 DBG("%s:%d: not found: aligned_len %lxh\n",
0907 __func__, __LINE__, aligned_len);
0908 BUG();
0909 }
0910
0911 c->usage_count--;
0912
0913 if (!c->usage_count) {
0914 list_del(&c->link);
0915 dma_sb_free_chunk(c);
0916 }
0917
0918 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
0919 return 0;
0920 }
0921
0922 static int dma_ioc0_unmap_area(struct ps3_dma_region *r,
0923 dma_addr_t bus_addr, unsigned long len)
0924 {
0925 unsigned long flags;
0926 struct dma_chunk *c;
0927
0928 DBG("%s: start a=%#llx l=%#lx\n", __func__, bus_addr, len);
0929 spin_lock_irqsave(&r->chunk_list.lock, flags);
0930 c = dma_find_chunk(r, bus_addr, len);
0931
0932 if (!c) {
0933 unsigned long aligned_bus = ALIGN_DOWN(bus_addr,
0934 1 << r->page_size);
0935 unsigned long aligned_len = ALIGN(len + bus_addr
0936 - aligned_bus,
0937 1 << r->page_size);
0938 DBG("%s:%d: not found: bus_addr %llxh\n",
0939 __func__, __LINE__, bus_addr);
0940 DBG("%s:%d: not found: len %lxh\n",
0941 __func__, __LINE__, len);
0942 DBG("%s:%d: not found: aligned_bus %lxh\n",
0943 __func__, __LINE__, aligned_bus);
0944 DBG("%s:%d: not found: aligned_len %lxh\n",
0945 __func__, __LINE__, aligned_len);
0946 BUG();
0947 }
0948
0949 c->usage_count--;
0950
0951 if (!c->usage_count) {
0952 list_del(&c->link);
0953 dma_ioc0_free_chunk(c);
0954 }
0955
0956 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
0957 DBG("%s: end\n", __func__);
0958 return 0;
0959 }
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969 static int dma_sb_region_create_linear(struct ps3_dma_region *r)
0970 {
0971 int result;
0972 unsigned long virt_addr, len;
0973 dma_addr_t tmp;
0974
0975 if (r->len > 16*1024*1024) {
0976
0977 if (r->page_size != PS3_DMA_16M) {
0978 pr_info("%s:%d: forcing 16M pages for linear map\n",
0979 __func__, __LINE__);
0980 r->page_size = PS3_DMA_16M;
0981 r->len = ALIGN(r->len, 1 << r->page_size);
0982 }
0983 }
0984
0985 result = dma_sb_region_create(r);
0986 BUG_ON(result);
0987
0988 if (r->offset < map.rm.size) {
0989
0990 virt_addr = map.rm.base + r->offset;
0991 len = map.rm.size - r->offset;
0992 if (len > r->len)
0993 len = r->len;
0994 result = dma_sb_map_area(r, virt_addr, len, &tmp,
0995 CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
0996 CBE_IOPTE_M);
0997 BUG_ON(result);
0998 }
0999
1000 if (r->offset + r->len > map.rm.size) {
1001
1002 virt_addr = map.rm.size;
1003 len = r->len;
1004 if (r->offset >= map.rm.size)
1005 virt_addr += r->offset - map.rm.size;
1006 else
1007 len -= map.rm.size - r->offset;
1008 result = dma_sb_map_area(r, virt_addr, len, &tmp,
1009 CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
1010 CBE_IOPTE_M);
1011 BUG_ON(result);
1012 }
1013
1014 return result;
1015 }
1016
1017
1018
1019
1020
1021
1022
1023
1024 static int dma_sb_region_free_linear(struct ps3_dma_region *r)
1025 {
1026 int result;
1027 dma_addr_t bus_addr;
1028 unsigned long len, lpar_addr;
1029
1030 if (r->offset < map.rm.size) {
1031
1032 lpar_addr = map.rm.base + r->offset;
1033 len = map.rm.size - r->offset;
1034 if (len > r->len)
1035 len = r->len;
1036 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1037 result = dma_sb_unmap_area(r, bus_addr, len);
1038 BUG_ON(result);
1039 }
1040
1041 if (r->offset + r->len > map.rm.size) {
1042
1043 lpar_addr = map.r1.base;
1044 len = r->len;
1045 if (r->offset >= map.rm.size)
1046 lpar_addr += r->offset - map.rm.size;
1047 else
1048 len -= map.rm.size - r->offset;
1049 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1050 result = dma_sb_unmap_area(r, bus_addr, len);
1051 BUG_ON(result);
1052 }
1053
1054 result = dma_sb_region_free(r);
1055 BUG_ON(result);
1056
1057 return result;
1058 }
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072 static int dma_sb_map_area_linear(struct ps3_dma_region *r,
1073 unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr,
1074 u64 iopte_flag)
1075 {
1076 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
1077 : virt_addr;
1078 *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
1079 return 0;
1080 }
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091 static int dma_sb_unmap_area_linear(struct ps3_dma_region *r,
1092 dma_addr_t bus_addr, unsigned long len)
1093 {
1094 return 0;
1095 };
1096
1097 static const struct ps3_dma_region_ops ps3_dma_sb_region_ops = {
1098 .create = dma_sb_region_create,
1099 .free = dma_sb_region_free,
1100 .map = dma_sb_map_area,
1101 .unmap = dma_sb_unmap_area
1102 };
1103
1104 static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = {
1105 .create = dma_sb_region_create_linear,
1106 .free = dma_sb_region_free_linear,
1107 .map = dma_sb_map_area_linear,
1108 .unmap = dma_sb_unmap_area_linear
1109 };
1110
1111 static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = {
1112 .create = dma_ioc0_region_create,
1113 .free = dma_ioc0_region_free,
1114 .map = dma_ioc0_map_area,
1115 .unmap = dma_ioc0_unmap_area
1116 };
1117
1118 int ps3_dma_region_init(struct ps3_system_bus_device *dev,
1119 struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
1120 enum ps3_dma_region_type region_type, void *addr, unsigned long len)
1121 {
1122 unsigned long lpar_addr;
1123 int result;
1124
1125 lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
1126
1127 r->dev = dev;
1128 r->page_size = page_size;
1129 r->region_type = region_type;
1130 r->offset = lpar_addr;
1131 if (r->offset >= map.rm.size)
1132 r->offset -= map.r1.offset;
1133 r->len = len ? len : ALIGN(map.total, 1 << r->page_size);
1134
1135 dev->core.dma_mask = &r->dma_mask;
1136
1137 result = dma_set_mask_and_coherent(&dev->core, DMA_BIT_MASK(32));
1138
1139 if (result < 0) {
1140 dev_err(&dev->core, "%s:%d: dma_set_mask_and_coherent failed: %d\n",
1141 __func__, __LINE__, result);
1142 return result;
1143 }
1144
1145 switch (dev->dev_type) {
1146 case PS3_DEVICE_TYPE_SB:
1147 r->region_ops = (USE_DYNAMIC_DMA)
1148 ? &ps3_dma_sb_region_ops
1149 : &ps3_dma_sb_region_linear_ops;
1150 break;
1151 case PS3_DEVICE_TYPE_IOC0:
1152 r->region_ops = &ps3_dma_ioc0_region_ops;
1153 break;
1154 default:
1155 BUG();
1156 return -EINVAL;
1157 }
1158 return 0;
1159 }
1160 EXPORT_SYMBOL(ps3_dma_region_init);
1161
1162 int ps3_dma_region_create(struct ps3_dma_region *r)
1163 {
1164 BUG_ON(!r);
1165 BUG_ON(!r->region_ops);
1166 BUG_ON(!r->region_ops->create);
1167 return r->region_ops->create(r);
1168 }
1169 EXPORT_SYMBOL(ps3_dma_region_create);
1170
1171 int ps3_dma_region_free(struct ps3_dma_region *r)
1172 {
1173 BUG_ON(!r);
1174 BUG_ON(!r->region_ops);
1175 BUG_ON(!r->region_ops->free);
1176 return r->region_ops->free(r);
1177 }
1178 EXPORT_SYMBOL(ps3_dma_region_free);
1179
1180 int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
1181 unsigned long len, dma_addr_t *bus_addr,
1182 u64 iopte_flag)
1183 {
1184 return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag);
1185 }
1186
1187 int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr,
1188 unsigned long len)
1189 {
1190 return r->region_ops->unmap(r, bus_addr, len);
1191 }
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201 void __init ps3_mm_init(void)
1202 {
1203 int result;
1204
1205 DBG(" -> %s:%d\n", __func__, __LINE__);
1206
1207 result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
1208 &map.total);
1209
1210 if (result)
1211 panic("ps3_repository_read_mm_info() failed");
1212
1213 map.rm.offset = map.rm.base;
1214 map.vas_id = map.htab_size = 0;
1215
1216
1217
1218 BUG_ON(map.rm.base);
1219 BUG_ON(!map.rm.size);
1220
1221
1222
1223 if (ps3_mm_get_repository_highmem(&map.r1)) {
1224 result = ps3_mm_region_create(&map.r1, map.total - map.rm.size);
1225
1226 if (!result)
1227 ps3_mm_set_repository_highmem(&map.r1);
1228 }
1229
1230
1231 map.total = map.rm.size + map.r1.size;
1232
1233 if (!map.r1.size) {
1234 DBG("%s:%d: No highmem region found\n", __func__, __LINE__);
1235 } else {
1236 DBG("%s:%d: Adding highmem region: %llxh %llxh\n",
1237 __func__, __LINE__, map.rm.size,
1238 map.total - map.rm.size);
1239 memblock_add(map.rm.size, map.total - map.rm.size);
1240 }
1241
1242 DBG(" <- %s:%d\n", __func__, __LINE__);
1243 }
1244
1245
1246
1247
1248
1249
1250
1251 notrace void ps3_mm_shutdown(void)
1252 {
1253 ps3_mm_region_destroy(&map.r1);
1254 }