0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include "gem.h" /* TODO: for struct psb_gem_object, see psb_gtt_restore() */
0011 #include "psb_drv.h"
0012
0013
0014
0015
0016
0017
0018 int psb_gtt_allocate_resource(struct drm_psb_private *pdev, struct resource *res,
0019 const char *name, resource_size_t size, resource_size_t align,
0020 bool stolen, u32 *offset)
0021 {
0022 struct resource *root = pdev->gtt_mem;
0023 resource_size_t start, end;
0024 int ret;
0025
0026 if (stolen) {
0027
0028 start = root->start;
0029 end = root->start + pdev->gtt.stolen_size - 1;
0030 } else {
0031
0032 start = root->start + pdev->gtt.stolen_size;
0033 end = root->end;
0034 }
0035
0036 res->name = name;
0037 ret = allocate_resource(root, res, size, start, end, align, NULL, NULL);
0038 if (ret)
0039 return ret;
0040 *offset = res->start - root->start;
0041
0042 return 0;
0043 }
0044
0045
0046
0047
0048
0049
0050
0051
0052 uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
0053 {
0054 uint32_t mask = PSB_PTE_VALID;
0055
0056
0057
0058 BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
0059
0060 if (type & PSB_MMU_CACHED_MEMORY)
0061 mask |= PSB_PTE_CACHED;
0062 if (type & PSB_MMU_RO_MEMORY)
0063 mask |= PSB_PTE_RO;
0064 if (type & PSB_MMU_WO_MEMORY)
0065 mask |= PSB_PTE_WO;
0066
0067 return (pfn << PAGE_SHIFT) | mask;
0068 }
0069
0070 static u32 __iomem *psb_gtt_entry(struct drm_psb_private *pdev, const struct resource *res)
0071 {
0072 unsigned long offset = res->start - pdev->gtt_mem->start;
0073
0074 return pdev->gtt_map + (offset >> PAGE_SHIFT);
0075 }
0076
0077
0078 void psb_gtt_insert_pages(struct drm_psb_private *pdev, const struct resource *res,
0079 struct page **pages)
0080 {
0081 resource_size_t npages, i;
0082 u32 __iomem *gtt_slot;
0083 u32 pte;
0084
0085 mutex_lock(&pdev->gtt_mutex);
0086
0087
0088
0089 npages = resource_size(res) >> PAGE_SHIFT;
0090 gtt_slot = psb_gtt_entry(pdev, res);
0091
0092 for (i = 0; i < npages; ++i, ++gtt_slot) {
0093 pte = psb_gtt_mask_pte(page_to_pfn(pages[i]), PSB_MMU_CACHED_MEMORY);
0094 iowrite32(pte, gtt_slot);
0095 }
0096
0097
0098 ioread32(gtt_slot - 1);
0099
0100 mutex_unlock(&pdev->gtt_mutex);
0101 }
0102
0103
0104 void psb_gtt_remove_pages(struct drm_psb_private *pdev, const struct resource *res)
0105 {
0106 resource_size_t npages, i;
0107 u32 __iomem *gtt_slot;
0108 u32 pte;
0109
0110 mutex_lock(&pdev->gtt_mutex);
0111
0112
0113
0114 pte = psb_gtt_mask_pte(page_to_pfn(pdev->scratch_page), PSB_MMU_CACHED_MEMORY);
0115
0116 npages = resource_size(res) >> PAGE_SHIFT;
0117 gtt_slot = psb_gtt_entry(pdev, res);
0118
0119 for (i = 0; i < npages; ++i, ++gtt_slot)
0120 iowrite32(pte, gtt_slot);
0121
0122
0123 ioread32(gtt_slot - 1);
0124
0125 mutex_unlock(&pdev->gtt_mutex);
0126 }
0127
0128 static int psb_gtt_enable(struct drm_psb_private *dev_priv)
0129 {
0130 struct drm_device *dev = &dev_priv->dev;
0131 struct pci_dev *pdev = to_pci_dev(dev->dev);
0132 int ret;
0133
0134 ret = pci_read_config_word(pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
0135 if (ret)
0136 return pcibios_err_to_errno(ret);
0137 ret = pci_write_config_word(pdev, PSB_GMCH_CTRL, dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
0138 if (ret)
0139 return pcibios_err_to_errno(ret);
0140
0141 dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
0142 PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
0143
0144 (void)PSB_RVDC32(PSB_PGETBL_CTL);
0145
0146 return 0;
0147 }
0148
0149 static void psb_gtt_disable(struct drm_psb_private *dev_priv)
0150 {
0151 struct drm_device *dev = &dev_priv->dev;
0152 struct pci_dev *pdev = to_pci_dev(dev->dev);
0153
0154 pci_write_config_word(pdev, PSB_GMCH_CTRL, dev_priv->gmch_ctrl);
0155 PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
0156
0157 (void)PSB_RVDC32(PSB_PGETBL_CTL);
0158 }
0159
0160 void psb_gtt_fini(struct drm_device *dev)
0161 {
0162 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
0163
0164 iounmap(dev_priv->gtt_map);
0165 psb_gtt_disable(dev_priv);
0166 mutex_destroy(&dev_priv->gtt_mutex);
0167 }
0168
0169
0170 static void psb_gtt_clear(struct drm_psb_private *pdev)
0171 {
0172 resource_size_t pfn_base;
0173 unsigned long i;
0174 uint32_t pte;
0175
0176 pfn_base = page_to_pfn(pdev->scratch_page);
0177 pte = psb_gtt_mask_pte(pfn_base, PSB_MMU_CACHED_MEMORY);
0178
0179 for (i = 0; i < pdev->gtt.gtt_pages; ++i)
0180 iowrite32(pte, pdev->gtt_map + i);
0181
0182 (void)ioread32(pdev->gtt_map + i - 1);
0183 }
0184
0185 static void psb_gtt_init_ranges(struct drm_psb_private *dev_priv)
0186 {
0187 struct drm_device *dev = &dev_priv->dev;
0188 struct pci_dev *pdev = to_pci_dev(dev->dev);
0189 struct psb_gtt *pg = &dev_priv->gtt;
0190 resource_size_t gtt_phys_start, mmu_gatt_start, gtt_start, gtt_pages,
0191 gatt_start, gatt_pages;
0192 struct resource *gtt_mem;
0193
0194
0195 gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
0196
0197
0198
0199
0200
0201
0202
0203 mmu_gatt_start = 0xe0000000;
0204
0205 gtt_start = pci_resource_start(pdev, PSB_GTT_RESOURCE);
0206 gtt_pages = pci_resource_len(pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
0207
0208
0209 if (!gtt_start || !gtt_pages) {
0210 dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n");
0211 gtt_pages = 64;
0212 gtt_start = dev_priv->pge_ctl;
0213 }
0214
0215 gatt_start = pci_resource_start(pdev, PSB_GATT_RESOURCE);
0216 gatt_pages = pci_resource_len(pdev, PSB_GATT_RESOURCE) >> PAGE_SHIFT;
0217
0218 if (!gatt_pages || !gatt_start) {
0219 static struct resource fudge;
0220
0221
0222
0223
0224
0225
0226 dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n");
0227 gatt_start = 0x40000000;
0228 gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
0229
0230
0231
0232
0233
0234
0235
0236 fudge.start = 0x40000000;
0237 fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
0238 fudge.name = "fudge";
0239 fudge.flags = IORESOURCE_MEM;
0240
0241 gtt_mem = &fudge;
0242 } else {
0243 gtt_mem = &pdev->resource[PSB_GATT_RESOURCE];
0244 }
0245
0246 pg->gtt_phys_start = gtt_phys_start;
0247 pg->mmu_gatt_start = mmu_gatt_start;
0248 pg->gtt_start = gtt_start;
0249 pg->gtt_pages = gtt_pages;
0250 pg->gatt_start = gatt_start;
0251 pg->gatt_pages = gatt_pages;
0252 dev_priv->gtt_mem = gtt_mem;
0253 }
0254
0255 int psb_gtt_init(struct drm_device *dev)
0256 {
0257 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
0258 struct psb_gtt *pg = &dev_priv->gtt;
0259 int ret;
0260
0261 mutex_init(&dev_priv->gtt_mutex);
0262
0263 ret = psb_gtt_enable(dev_priv);
0264 if (ret)
0265 goto err_mutex_destroy;
0266
0267 psb_gtt_init_ranges(dev_priv);
0268
0269 dev_priv->gtt_map = ioremap(pg->gtt_phys_start, pg->gtt_pages << PAGE_SHIFT);
0270 if (!dev_priv->gtt_map) {
0271 dev_err(dev->dev, "Failure to map gtt.\n");
0272 ret = -ENOMEM;
0273 goto err_psb_gtt_disable;
0274 }
0275
0276 psb_gtt_clear(dev_priv);
0277
0278 return 0;
0279
0280 err_psb_gtt_disable:
0281 psb_gtt_disable(dev_priv);
0282 err_mutex_destroy:
0283 mutex_destroy(&dev_priv->gtt_mutex);
0284 return ret;
0285 }
0286
0287 int psb_gtt_resume(struct drm_device *dev)
0288 {
0289 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
0290 struct psb_gtt *pg = &dev_priv->gtt;
0291 unsigned int old_gtt_pages = pg->gtt_pages;
0292 int ret;
0293
0294
0295 ret = psb_gtt_enable(dev_priv);
0296 if (ret)
0297 return ret;
0298
0299 psb_gtt_init_ranges(dev_priv);
0300
0301 if (old_gtt_pages != pg->gtt_pages) {
0302 dev_err(dev->dev, "GTT resume error.\n");
0303 ret = -ENODEV;
0304 goto err_psb_gtt_disable;
0305 }
0306
0307 psb_gtt_clear(dev_priv);
0308
0309 err_psb_gtt_disable:
0310 psb_gtt_disable(dev_priv);
0311 return ret;
0312 }