0001
0002
0003
0004
0005
0006
0007 #include <linux/highmem.h>
0008
0009 #include "mmu.h"
0010 #include "psb_drv.h"
0011 #include "psb_reg.h"
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041 static inline uint32_t psb_mmu_pt_index(uint32_t offset)
0042 {
0043 return (offset >> PSB_PTE_SHIFT) & 0x3FF;
0044 }
0045
0046 static inline uint32_t psb_mmu_pd_index(uint32_t offset)
0047 {
0048 return offset >> PSB_PDE_SHIFT;
0049 }
0050
0051 static inline void psb_clflush(void *addr)
0052 {
0053 __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
0054 }
0055
0056 static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
0057 {
0058 if (!driver->has_clflush)
0059 return;
0060
0061 mb();
0062 psb_clflush(addr);
0063 mb();
0064 }
0065
0066 static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
0067 {
0068 struct drm_device *dev = driver->dev;
0069 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
0070
0071 if (atomic_read(&driver->needs_tlbflush) || force) {
0072 uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
0073 PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
0074
0075
0076 wmb();
0077 PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
0078 (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
0079 if (driver->msvdx_mmu_invaldc)
0080 atomic_set(driver->msvdx_mmu_invaldc, 1);
0081 }
0082 atomic_set(&driver->needs_tlbflush, 0);
0083 }
0084
0085 #if 0
0086 static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
0087 {
0088 down_write(&driver->sem);
0089 psb_mmu_flush_pd_locked(driver, force);
0090 up_write(&driver->sem);
0091 }
0092 #endif
0093
0094 void psb_mmu_flush(struct psb_mmu_driver *driver)
0095 {
0096 struct drm_device *dev = driver->dev;
0097 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
0098 uint32_t val;
0099
0100 down_write(&driver->sem);
0101 val = PSB_RSGX32(PSB_CR_BIF_CTRL);
0102 if (atomic_read(&driver->needs_tlbflush))
0103 PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
0104 else
0105 PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
0106
0107
0108
0109 wmb();
0110 PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
0111 PSB_CR_BIF_CTRL);
0112 (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
0113
0114 atomic_set(&driver->needs_tlbflush, 0);
0115 if (driver->msvdx_mmu_invaldc)
0116 atomic_set(driver->msvdx_mmu_invaldc, 1);
0117 up_write(&driver->sem);
0118 }
0119
0120 void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
0121 {
0122 struct drm_device *dev = pd->driver->dev;
0123 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
0124 uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
0125 PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
0126
0127 down_write(&pd->driver->sem);
0128 PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
0129 wmb();
0130 psb_mmu_flush_pd_locked(pd->driver, 1);
0131 pd->hw_context = hw_context;
0132 up_write(&pd->driver->sem);
0133
0134 }
0135
0136 static inline unsigned long psb_pd_addr_end(unsigned long addr,
0137 unsigned long end)
0138 {
0139 addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
0140 return (addr < end) ? addr : end;
0141 }
0142
0143 static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
0144 {
0145 uint32_t mask = PSB_PTE_VALID;
0146
0147 if (type & PSB_MMU_CACHED_MEMORY)
0148 mask |= PSB_PTE_CACHED;
0149 if (type & PSB_MMU_RO_MEMORY)
0150 mask |= PSB_PTE_RO;
0151 if (type & PSB_MMU_WO_MEMORY)
0152 mask |= PSB_PTE_WO;
0153
0154 return (pfn << PAGE_SHIFT) | mask;
0155 }
0156
0157 struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
0158 int trap_pagefaults, int invalid_type)
0159 {
0160 struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
0161 uint32_t *v;
0162 int i;
0163
0164 if (!pd)
0165 return NULL;
0166
0167 pd->p = alloc_page(GFP_DMA32);
0168 if (!pd->p)
0169 goto out_err1;
0170 pd->dummy_pt = alloc_page(GFP_DMA32);
0171 if (!pd->dummy_pt)
0172 goto out_err2;
0173 pd->dummy_page = alloc_page(GFP_DMA32);
0174 if (!pd->dummy_page)
0175 goto out_err3;
0176
0177 if (!trap_pagefaults) {
0178 pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
0179 invalid_type);
0180 pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
0181 invalid_type);
0182 } else {
0183 pd->invalid_pde = 0;
0184 pd->invalid_pte = 0;
0185 }
0186
0187 v = kmap_local_page(pd->dummy_pt);
0188 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
0189 v[i] = pd->invalid_pte;
0190
0191 kunmap_local(v);
0192
0193 v = kmap_local_page(pd->p);
0194 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
0195 v[i] = pd->invalid_pde;
0196
0197 kunmap_local(v);
0198
0199 clear_page(kmap(pd->dummy_page));
0200 kunmap(pd->dummy_page);
0201
0202 pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
0203 if (!pd->tables)
0204 goto out_err4;
0205
0206 pd->hw_context = -1;
0207 pd->pd_mask = PSB_PTE_VALID;
0208 pd->driver = driver;
0209
0210 return pd;
0211
0212 out_err4:
0213 __free_page(pd->dummy_page);
0214 out_err3:
0215 __free_page(pd->dummy_pt);
0216 out_err2:
0217 __free_page(pd->p);
0218 out_err1:
0219 kfree(pd);
0220 return NULL;
0221 }
0222
0223 static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
0224 {
0225 __free_page(pt->p);
0226 kfree(pt);
0227 }
0228
0229 void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
0230 {
0231 struct psb_mmu_driver *driver = pd->driver;
0232 struct drm_device *dev = driver->dev;
0233 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
0234 struct psb_mmu_pt *pt;
0235 int i;
0236
0237 down_write(&driver->sem);
0238 if (pd->hw_context != -1) {
0239 PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
0240 psb_mmu_flush_pd_locked(driver, 1);
0241 }
0242
0243
0244
0245
0246 for (i = 0; i < 1024; ++i) {
0247 pt = pd->tables[i];
0248 if (pt)
0249 psb_mmu_free_pt(pt);
0250 }
0251
0252 vfree(pd->tables);
0253 __free_page(pd->dummy_page);
0254 __free_page(pd->dummy_pt);
0255 __free_page(pd->p);
0256 kfree(pd);
0257 up_write(&driver->sem);
0258 }
0259
0260 static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
0261 {
0262 struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
0263 void *v;
0264 uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
0265 uint32_t clflush_count = PAGE_SIZE / clflush_add;
0266 spinlock_t *lock = &pd->driver->lock;
0267 uint8_t *clf;
0268 uint32_t *ptes;
0269 int i;
0270
0271 if (!pt)
0272 return NULL;
0273
0274 pt->p = alloc_page(GFP_DMA32);
0275 if (!pt->p) {
0276 kfree(pt);
0277 return NULL;
0278 }
0279
0280 spin_lock(lock);
0281
0282 v = kmap_atomic(pt->p);
0283 clf = (uint8_t *) v;
0284 ptes = (uint32_t *) v;
0285 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
0286 *ptes++ = pd->invalid_pte;
0287
0288 if (pd->driver->has_clflush && pd->hw_context != -1) {
0289 mb();
0290 for (i = 0; i < clflush_count; ++i) {
0291 psb_clflush(clf);
0292 clf += clflush_add;
0293 }
0294 mb();
0295 }
0296 kunmap_atomic(v);
0297 spin_unlock(lock);
0298
0299 pt->count = 0;
0300 pt->pd = pd;
0301 pt->index = 0;
0302
0303 return pt;
0304 }
0305
0306 static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
0307 unsigned long addr)
0308 {
0309 uint32_t index = psb_mmu_pd_index(addr);
0310 struct psb_mmu_pt *pt;
0311 uint32_t *v;
0312 spinlock_t *lock = &pd->driver->lock;
0313
0314 spin_lock(lock);
0315 pt = pd->tables[index];
0316 while (!pt) {
0317 spin_unlock(lock);
0318 pt = psb_mmu_alloc_pt(pd);
0319 if (!pt)
0320 return NULL;
0321 spin_lock(lock);
0322
0323 if (pd->tables[index]) {
0324 spin_unlock(lock);
0325 psb_mmu_free_pt(pt);
0326 spin_lock(lock);
0327 pt = pd->tables[index];
0328 continue;
0329 }
0330
0331 v = kmap_atomic(pd->p);
0332 pd->tables[index] = pt;
0333 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
0334 pt->index = index;
0335 kunmap_atomic((void *) v);
0336
0337 if (pd->hw_context != -1) {
0338 psb_mmu_clflush(pd->driver, (void *)&v[index]);
0339 atomic_set(&pd->driver->needs_tlbflush, 1);
0340 }
0341 }
0342 pt->v = kmap_atomic(pt->p);
0343 return pt;
0344 }
0345
0346 static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
0347 unsigned long addr)
0348 {
0349 uint32_t index = psb_mmu_pd_index(addr);
0350 struct psb_mmu_pt *pt;
0351 spinlock_t *lock = &pd->driver->lock;
0352
0353 spin_lock(lock);
0354 pt = pd->tables[index];
0355 if (!pt) {
0356 spin_unlock(lock);
0357 return NULL;
0358 }
0359 pt->v = kmap_atomic(pt->p);
0360 return pt;
0361 }
0362
0363 static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
0364 {
0365 struct psb_mmu_pd *pd = pt->pd;
0366 uint32_t *v;
0367
0368 kunmap_atomic(pt->v);
0369 if (pt->count == 0) {
0370 v = kmap_atomic(pd->p);
0371 v[pt->index] = pd->invalid_pde;
0372 pd->tables[pt->index] = NULL;
0373
0374 if (pd->hw_context != -1) {
0375 psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
0376 atomic_set(&pd->driver->needs_tlbflush, 1);
0377 }
0378 kunmap_atomic(v);
0379 spin_unlock(&pd->driver->lock);
0380 psb_mmu_free_pt(pt);
0381 return;
0382 }
0383 spin_unlock(&pd->driver->lock);
0384 }
0385
0386 static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
0387 uint32_t pte)
0388 {
0389 pt->v[psb_mmu_pt_index(addr)] = pte;
0390 }
0391
0392 static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
0393 unsigned long addr)
0394 {
0395 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
0396 }
0397
0398 struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
0399 {
0400 struct psb_mmu_pd *pd;
0401
0402 down_read(&driver->sem);
0403 pd = driver->default_pd;
0404 up_read(&driver->sem);
0405
0406 return pd;
0407 }
0408
0409 void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
0410 {
0411 struct drm_device *dev = driver->dev;
0412 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
0413
0414 PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
0415 psb_mmu_free_pagedir(driver->default_pd);
0416 kfree(driver);
0417 }
0418
0419 struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
0420 int trap_pagefaults,
0421 int invalid_type,
0422 atomic_t *msvdx_mmu_invaldc)
0423 {
0424 struct psb_mmu_driver *driver;
0425 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
0426
0427 driver = kmalloc(sizeof(*driver), GFP_KERNEL);
0428
0429 if (!driver)
0430 return NULL;
0431
0432 driver->dev = dev;
0433 driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
0434 invalid_type);
0435 if (!driver->default_pd)
0436 goto out_err1;
0437
0438 spin_lock_init(&driver->lock);
0439 init_rwsem(&driver->sem);
0440 down_write(&driver->sem);
0441 atomic_set(&driver->needs_tlbflush, 1);
0442 driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
0443
0444 driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
0445 PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
0446 PSB_CR_BIF_CTRL);
0447 PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
0448 PSB_CR_BIF_CTRL);
0449
0450 driver->has_clflush = 0;
0451
0452 if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
0453 uint32_t tfms, misc, cap0, cap4, clflush_size;
0454
0455
0456
0457
0458
0459
0460 cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
0461 clflush_size = ((misc >> 8) & 0xff) * 8;
0462 driver->has_clflush = 1;
0463 driver->clflush_add =
0464 PAGE_SIZE * clflush_size / sizeof(uint32_t);
0465 driver->clflush_mask = driver->clflush_add - 1;
0466 driver->clflush_mask = ~driver->clflush_mask;
0467 }
0468
0469 up_write(&driver->sem);
0470 return driver;
0471
0472 out_err1:
0473 kfree(driver);
0474 return NULL;
0475 }
0476
0477 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
0478 uint32_t num_pages, uint32_t desired_tile_stride,
0479 uint32_t hw_tile_stride)
0480 {
0481 struct psb_mmu_pt *pt;
0482 uint32_t rows = 1;
0483 uint32_t i;
0484 unsigned long addr;
0485 unsigned long end;
0486 unsigned long next;
0487 unsigned long add;
0488 unsigned long row_add;
0489 unsigned long clflush_add = pd->driver->clflush_add;
0490 unsigned long clflush_mask = pd->driver->clflush_mask;
0491
0492 if (!pd->driver->has_clflush)
0493 return;
0494
0495 if (hw_tile_stride)
0496 rows = num_pages / desired_tile_stride;
0497 else
0498 desired_tile_stride = num_pages;
0499
0500 add = desired_tile_stride << PAGE_SHIFT;
0501 row_add = hw_tile_stride << PAGE_SHIFT;
0502 mb();
0503 for (i = 0; i < rows; ++i) {
0504
0505 addr = address;
0506 end = addr + add;
0507
0508 do {
0509 next = psb_pd_addr_end(addr, end);
0510 pt = psb_mmu_pt_map_lock(pd, addr);
0511 if (!pt)
0512 continue;
0513 do {
0514 psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
0515 } while (addr += clflush_add,
0516 (addr & clflush_mask) < next);
0517
0518 psb_mmu_pt_unmap_unlock(pt);
0519 } while (addr = next, next != end);
0520 address += row_add;
0521 }
0522 mb();
0523 }
0524
0525 void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
0526 unsigned long address, uint32_t num_pages)
0527 {
0528 struct psb_mmu_pt *pt;
0529 unsigned long addr;
0530 unsigned long end;
0531 unsigned long next;
0532 unsigned long f_address = address;
0533
0534 down_read(&pd->driver->sem);
0535
0536 addr = address;
0537 end = addr + (num_pages << PAGE_SHIFT);
0538
0539 do {
0540 next = psb_pd_addr_end(addr, end);
0541 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
0542 if (!pt)
0543 goto out;
0544 do {
0545 psb_mmu_invalidate_pte(pt, addr);
0546 --pt->count;
0547 } while (addr += PAGE_SIZE, addr < next);
0548 psb_mmu_pt_unmap_unlock(pt);
0549
0550 } while (addr = next, next != end);
0551
0552 out:
0553 if (pd->hw_context != -1)
0554 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
0555
0556 up_read(&pd->driver->sem);
0557
0558 if (pd->hw_context != -1)
0559 psb_mmu_flush(pd->driver);
0560
0561 return;
0562 }
0563
0564 void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
0565 uint32_t num_pages, uint32_t desired_tile_stride,
0566 uint32_t hw_tile_stride)
0567 {
0568 struct psb_mmu_pt *pt;
0569 uint32_t rows = 1;
0570 uint32_t i;
0571 unsigned long addr;
0572 unsigned long end;
0573 unsigned long next;
0574 unsigned long add;
0575 unsigned long row_add;
0576 unsigned long f_address = address;
0577
0578 if (hw_tile_stride)
0579 rows = num_pages / desired_tile_stride;
0580 else
0581 desired_tile_stride = num_pages;
0582
0583 add = desired_tile_stride << PAGE_SHIFT;
0584 row_add = hw_tile_stride << PAGE_SHIFT;
0585
0586 down_read(&pd->driver->sem);
0587
0588
0589
0590 for (i = 0; i < rows; ++i) {
0591
0592 addr = address;
0593 end = addr + add;
0594
0595 do {
0596 next = psb_pd_addr_end(addr, end);
0597 pt = psb_mmu_pt_map_lock(pd, addr);
0598 if (!pt)
0599 continue;
0600 do {
0601 psb_mmu_invalidate_pte(pt, addr);
0602 --pt->count;
0603
0604 } while (addr += PAGE_SIZE, addr < next);
0605 psb_mmu_pt_unmap_unlock(pt);
0606
0607 } while (addr = next, next != end);
0608 address += row_add;
0609 }
0610 if (pd->hw_context != -1)
0611 psb_mmu_flush_ptes(pd, f_address, num_pages,
0612 desired_tile_stride, hw_tile_stride);
0613
0614 up_read(&pd->driver->sem);
0615
0616 if (pd->hw_context != -1)
0617 psb_mmu_flush(pd->driver);
0618 }
0619
0620 int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
0621 unsigned long address, uint32_t num_pages,
0622 int type)
0623 {
0624 struct psb_mmu_pt *pt;
0625 uint32_t pte;
0626 unsigned long addr;
0627 unsigned long end;
0628 unsigned long next;
0629 unsigned long f_address = address;
0630 int ret = -ENOMEM;
0631
0632 down_read(&pd->driver->sem);
0633
0634 addr = address;
0635 end = addr + (num_pages << PAGE_SHIFT);
0636
0637 do {
0638 next = psb_pd_addr_end(addr, end);
0639 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
0640 if (!pt) {
0641 ret = -ENOMEM;
0642 goto out;
0643 }
0644 do {
0645 pte = psb_mmu_mask_pte(start_pfn++, type);
0646 psb_mmu_set_pte(pt, addr, pte);
0647 pt->count++;
0648 } while (addr += PAGE_SIZE, addr < next);
0649 psb_mmu_pt_unmap_unlock(pt);
0650
0651 } while (addr = next, next != end);
0652 ret = 0;
0653
0654 out:
0655 if (pd->hw_context != -1)
0656 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
0657
0658 up_read(&pd->driver->sem);
0659
0660 if (pd->hw_context != -1)
0661 psb_mmu_flush(pd->driver);
0662
0663 return ret;
0664 }
0665
0666 int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
0667 unsigned long address, uint32_t num_pages,
0668 uint32_t desired_tile_stride, uint32_t hw_tile_stride,
0669 int type)
0670 {
0671 struct psb_mmu_pt *pt;
0672 uint32_t rows = 1;
0673 uint32_t i;
0674 uint32_t pte;
0675 unsigned long addr;
0676 unsigned long end;
0677 unsigned long next;
0678 unsigned long add;
0679 unsigned long row_add;
0680 unsigned long f_address = address;
0681 int ret = -ENOMEM;
0682
0683 if (hw_tile_stride) {
0684 if (num_pages % desired_tile_stride != 0)
0685 return -EINVAL;
0686 rows = num_pages / desired_tile_stride;
0687 } else {
0688 desired_tile_stride = num_pages;
0689 }
0690
0691 add = desired_tile_stride << PAGE_SHIFT;
0692 row_add = hw_tile_stride << PAGE_SHIFT;
0693
0694 down_read(&pd->driver->sem);
0695
0696 for (i = 0; i < rows; ++i) {
0697
0698 addr = address;
0699 end = addr + add;
0700
0701 do {
0702 next = psb_pd_addr_end(addr, end);
0703 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
0704 if (!pt)
0705 goto out;
0706 do {
0707 pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
0708 type);
0709 psb_mmu_set_pte(pt, addr, pte);
0710 pt->count++;
0711 } while (addr += PAGE_SIZE, addr < next);
0712 psb_mmu_pt_unmap_unlock(pt);
0713
0714 } while (addr = next, next != end);
0715
0716 address += row_add;
0717 }
0718
0719 ret = 0;
0720 out:
0721 if (pd->hw_context != -1)
0722 psb_mmu_flush_ptes(pd, f_address, num_pages,
0723 desired_tile_stride, hw_tile_stride);
0724
0725 up_read(&pd->driver->sem);
0726
0727 if (pd->hw_context != -1)
0728 psb_mmu_flush(pd->driver);
0729
0730 return ret;
0731 }
0732
0733 int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
0734 unsigned long *pfn)
0735 {
0736 int ret;
0737 struct psb_mmu_pt *pt;
0738 uint32_t tmp;
0739 spinlock_t *lock = &pd->driver->lock;
0740
0741 down_read(&pd->driver->sem);
0742 pt = psb_mmu_pt_map_lock(pd, virtual);
0743 if (!pt) {
0744 uint32_t *v;
0745
0746 spin_lock(lock);
0747 v = kmap_atomic(pd->p);
0748 tmp = v[psb_mmu_pd_index(virtual)];
0749 kunmap_atomic(v);
0750 spin_unlock(lock);
0751
0752 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
0753 !(pd->invalid_pte & PSB_PTE_VALID)) {
0754 ret = -EINVAL;
0755 goto out;
0756 }
0757 ret = 0;
0758 *pfn = pd->invalid_pte >> PAGE_SHIFT;
0759 goto out;
0760 }
0761 tmp = pt->v[psb_mmu_pt_index(virtual)];
0762 if (!(tmp & PSB_PTE_VALID)) {
0763 ret = -EINVAL;
0764 } else {
0765 ret = 0;
0766 *pfn = tmp >> PAGE_SHIFT;
0767 }
0768 psb_mmu_pt_unmap_unlock(pt);
0769 out:
0770 up_read(&pd->driver->sem);
0771 return ret;
0772 }