Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *
0004  * Copyright (C) 2013 Freescale Semiconductor, Inc.
0005  */
0006 
0007 #define pr_fmt(fmt)    "fsl-pamu: %s: " fmt, __func__
0008 
0009 #include "fsl_pamu.h"
0010 
0011 #include <linux/fsl/guts.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/genalloc.h>
0014 #include <linux/of_address.h>
0015 #include <linux/of_irq.h>
0016 #include <linux/platform_device.h>
0017 
0018 #include <asm/mpc85xx.h>
0019 
0020 /* define indexes for each operation mapping scenario */
0021 #define OMI_QMAN        0x00
0022 #define OMI_FMAN        0x01
0023 #define OMI_QMAN_PRIV   0x02
0024 #define OMI_CAAM        0x03
0025 
0026 #define make64(high, low) (((u64)(high) << 32) | (low))
0027 
0028 struct pamu_isr_data {
0029     void __iomem *pamu_reg_base;    /* Base address of PAMU regs */
0030     unsigned int count;     /* The number of PAMUs */
0031 };
0032 
0033 static struct paace *ppaact;
0034 static struct paace *spaact;
0035 
0036 static bool probed;         /* Has PAMU been probed? */
0037 
0038 /*
0039  * Table for matching compatible strings, for device tree
0040  * guts node, for QorIQ SOCs.
0041  * "fsl,qoriq-device-config-2.0" corresponds to T4 & B4
0042  * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
0043  * string would be used.
0044  */
0045 static const struct of_device_id guts_device_ids[] = {
0046     { .compatible = "fsl,qoriq-device-config-1.0", },
0047     { .compatible = "fsl,qoriq-device-config-2.0", },
0048     {}
0049 };
0050 
0051 /*
0052  * Table for matching compatible strings, for device tree
0053  * L3 cache controller node.
0054  * "fsl,t4240-l3-cache-controller" corresponds to T4,
0055  * "fsl,b4860-l3-cache-controller" corresponds to B4 &
0056  * "fsl,p4080-l3-cache-controller" corresponds to other,
0057  * SOCs.
0058  */
0059 static const struct of_device_id l3_device_ids[] = {
0060     { .compatible = "fsl,t4240-l3-cache-controller", },
0061     { .compatible = "fsl,b4860-l3-cache-controller", },
0062     { .compatible = "fsl,p4080-l3-cache-controller", },
0063     {}
0064 };
0065 
0066 /* maximum subwindows permitted per liodn */
0067 static u32 max_subwindow_count;
0068 
0069 /**
0070  * pamu_get_ppaace() - Return the primary PACCE
0071  * @liodn: liodn PAACT index for desired PAACE
0072  *
0073  * Returns the ppace pointer upon success else return
0074  * null.
0075  */
0076 static struct paace *pamu_get_ppaace(int liodn)
0077 {
0078     if (!ppaact || liodn >= PAACE_NUMBER_ENTRIES) {
0079         pr_debug("PPAACT doesn't exist\n");
0080         return NULL;
0081     }
0082 
0083     return &ppaact[liodn];
0084 }
0085 
0086 /**
0087  * pamu_enable_liodn() - Set valid bit of PACCE
0088  * @liodn: liodn PAACT index for desired PAACE
0089  *
0090  * Returns 0 upon success else error code < 0 returned
0091  */
0092 int pamu_enable_liodn(int liodn)
0093 {
0094     struct paace *ppaace;
0095 
0096     ppaace = pamu_get_ppaace(liodn);
0097     if (!ppaace) {
0098         pr_debug("Invalid primary paace entry\n");
0099         return -ENOENT;
0100     }
0101 
0102     if (!get_bf(ppaace->addr_bitfields, PPAACE_AF_WSE)) {
0103         pr_debug("liodn %d not configured\n", liodn);
0104         return -EINVAL;
0105     }
0106 
0107     /* Ensure that all other stores to the ppaace complete first */
0108     mb();
0109 
0110     set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID);
0111     mb();
0112 
0113     return 0;
0114 }
0115 
0116 /**
0117  * pamu_disable_liodn() - Clears valid bit of PACCE
0118  * @liodn: liodn PAACT index for desired PAACE
0119  *
0120  * Returns 0 upon success else error code < 0 returned
0121  */
0122 int pamu_disable_liodn(int liodn)
0123 {
0124     struct paace *ppaace;
0125 
0126     ppaace = pamu_get_ppaace(liodn);
0127     if (!ppaace) {
0128         pr_debug("Invalid primary paace entry\n");
0129         return -ENOENT;
0130     }
0131 
0132     set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID);
0133     mb();
0134 
0135     return 0;
0136 }
0137 
0138 /* Derive the window size encoding for a particular PAACE entry */
0139 static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
0140 {
0141     /* Bug if not a power of 2 */
0142     BUG_ON(addrspace_size & (addrspace_size - 1));
0143 
0144     /* window size is 2^(WSE+1) bytes */
0145     return fls64(addrspace_size) - 2;
0146 }
0147 
0148 /*
0149  * Set the PAACE type as primary and set the coherency required domain
0150  * attribute
0151  */
0152 static void pamu_init_ppaace(struct paace *ppaace)
0153 {
0154     set_bf(ppaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_PRIMARY);
0155 
0156     set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
0157            PAACE_M_COHERENCE_REQ);
0158 }
0159 
0160 /*
0161  * Function used for updating stash destination for the coressponding
0162  * LIODN.
0163  */
0164 int pamu_update_paace_stash(int liodn, u32 value)
0165 {
0166     struct paace *paace;
0167 
0168     paace = pamu_get_ppaace(liodn);
0169     if (!paace) {
0170         pr_debug("Invalid liodn entry\n");
0171         return -ENOENT;
0172     }
0173     set_bf(paace->impl_attr, PAACE_IA_CID, value);
0174 
0175     mb();
0176 
0177     return 0;
0178 }
0179 
0180 /**
0181  * pamu_config_paace() - Sets up PPAACE entry for specified liodn
0182  *
0183  * @liodn: Logical IO device number
0184  * @omi: Operation mapping index -- if ~omi == 0 then omi not defined
0185  * @stashid: cache stash id for associated cpu -- if ~stashid == 0 then
0186  *       stashid not defined
0187  * @prot: window permissions
0188  *
0189  * Returns 0 upon success else error code < 0 returned
0190  */
0191 int pamu_config_ppaace(int liodn, u32 omi, u32 stashid, int prot)
0192 {
0193     struct paace *ppaace;
0194 
0195     ppaace = pamu_get_ppaace(liodn);
0196     if (!ppaace)
0197         return -ENOENT;
0198 
0199     /* window size is 2^(WSE+1) bytes */
0200     set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE,
0201            map_addrspace_size_to_wse(1ULL << 36));
0202 
0203     pamu_init_ppaace(ppaace);
0204 
0205     ppaace->wbah = 0;
0206     set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0);
0207 
0208     /* set up operation mapping if it's configured */
0209     if (omi < OME_NUMBER_ENTRIES) {
0210         set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
0211         ppaace->op_encode.index_ot.omi = omi;
0212     } else if (~omi != 0) {
0213         pr_debug("bad operation mapping index: %d\n", omi);
0214         return -EINVAL;
0215     }
0216 
0217     /* configure stash id */
0218     if (~stashid != 0)
0219         set_bf(ppaace->impl_attr, PAACE_IA_CID, stashid);
0220 
0221     set_bf(ppaace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
0222     ppaace->twbah = 0;
0223     set_bf(ppaace->win_bitfields, PAACE_WIN_TWBAL, 0);
0224     set_bf(ppaace->addr_bitfields, PAACE_AF_AP, prot);
0225     set_bf(ppaace->impl_attr, PAACE_IA_WCE, 0);
0226     set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
0227     mb();
0228 
0229     return 0;
0230 }
0231 
0232 /**
0233  * get_ome_index() - Returns the index in the operation mapping table
0234  *                   for device.
0235  * @*omi_index: pointer for storing the index value
0236  *
0237  */
0238 void get_ome_index(u32 *omi_index, struct device *dev)
0239 {
0240     if (of_device_is_compatible(dev->of_node, "fsl,qman-portal"))
0241         *omi_index = OMI_QMAN;
0242     if (of_device_is_compatible(dev->of_node, "fsl,qman"))
0243         *omi_index = OMI_QMAN_PRIV;
0244 }
0245 
0246 /**
0247  * get_stash_id - Returns stash destination id corresponding to a
0248  *                cache type and vcpu.
0249  * @stash_dest_hint: L1, L2 or L3
0250  * @vcpu: vpcu target for a particular cache type.
0251  *
0252  * Returs stash on success or ~(u32)0 on failure.
0253  *
0254  */
0255 u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
0256 {
0257     const u32 *prop;
0258     struct device_node *node;
0259     u32 cache_level;
0260     int len, found = 0;
0261     int i;
0262 
0263     /* Fastpath, exit early if L3/CPC cache is target for stashing */
0264     if (stash_dest_hint == PAMU_ATTR_CACHE_L3) {
0265         node = of_find_matching_node(NULL, l3_device_ids);
0266         if (node) {
0267             prop = of_get_property(node, "cache-stash-id", NULL);
0268             if (!prop) {
0269                 pr_debug("missing cache-stash-id at %pOF\n",
0270                      node);
0271                 of_node_put(node);
0272                 return ~(u32)0;
0273             }
0274             of_node_put(node);
0275             return be32_to_cpup(prop);
0276         }
0277         return ~(u32)0;
0278     }
0279 
0280     for_each_of_cpu_node(node) {
0281         prop = of_get_property(node, "reg", &len);
0282         for (i = 0; i < len / sizeof(u32); i++) {
0283             if (be32_to_cpup(&prop[i]) == vcpu) {
0284                 found = 1;
0285                 goto found_cpu_node;
0286             }
0287         }
0288     }
0289 found_cpu_node:
0290 
0291     /* find the hwnode that represents the cache */
0292     for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) {
0293         if (stash_dest_hint == cache_level) {
0294             prop = of_get_property(node, "cache-stash-id", NULL);
0295             if (!prop) {
0296                 pr_debug("missing cache-stash-id at %pOF\n",
0297                      node);
0298                 of_node_put(node);
0299                 return ~(u32)0;
0300             }
0301             of_node_put(node);
0302             return be32_to_cpup(prop);
0303         }
0304 
0305         prop = of_get_property(node, "next-level-cache", NULL);
0306         if (!prop) {
0307             pr_debug("can't find next-level-cache at %pOF\n", node);
0308             of_node_put(node);
0309             return ~(u32)0;  /* can't traverse any further */
0310         }
0311         of_node_put(node);
0312 
0313         /* advance to next node in cache hierarchy */
0314         node = of_find_node_by_phandle(*prop);
0315         if (!node) {
0316             pr_debug("Invalid node for cache hierarchy\n");
0317             return ~(u32)0;
0318         }
0319     }
0320 
0321     pr_debug("stash dest not found for %d on vcpu %d\n",
0322          stash_dest_hint, vcpu);
0323     return ~(u32)0;
0324 }
0325 
0326 /* Identify if the PAACT table entry belongs to QMAN, BMAN or QMAN Portal */
0327 #define QMAN_PAACE 1
0328 #define QMAN_PORTAL_PAACE 2
0329 #define BMAN_PAACE 3
0330 
0331 /**
0332  * Setup operation mapping and stash destinations for QMAN and QMAN portal.
0333  * Memory accesses to QMAN and BMAN private memory need not be coherent, so
0334  * clear the PAACE entry coherency attribute for them.
0335  */
0336 static void setup_qbman_paace(struct paace *ppaace, int  paace_type)
0337 {
0338     switch (paace_type) {
0339     case QMAN_PAACE:
0340         set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
0341         ppaace->op_encode.index_ot.omi = OMI_QMAN_PRIV;
0342         /* setup QMAN Private data stashing for the L3 cache */
0343         set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
0344         set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
0345                0);
0346         break;
0347     case QMAN_PORTAL_PAACE:
0348         set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
0349         ppaace->op_encode.index_ot.omi = OMI_QMAN;
0350         /* Set DQRR and Frame stashing for the L3 cache */
0351         set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
0352         break;
0353     case BMAN_PAACE:
0354         set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
0355                0);
0356         break;
0357     }
0358 }
0359 
0360 /**
0361  * Setup the operation mapping table for various devices. This is a static
0362  * table where each table index corresponds to a particular device. PAMU uses
0363  * this table to translate device transaction to appropriate corenet
0364  * transaction.
0365  */
0366 static void setup_omt(struct ome *omt)
0367 {
0368     struct ome *ome;
0369 
0370     /* Configure OMI_QMAN */
0371     ome = &omt[OMI_QMAN];
0372 
0373     ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READ;
0374     ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA;
0375     ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
0376     ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSAO;
0377 
0378     ome->moe[IOE_DIRECT0_IDX] = EOE_VALID | EOE_LDEC;
0379     ome->moe[IOE_DIRECT1_IDX] = EOE_VALID | EOE_LDECPE;
0380 
0381     /* Configure OMI_FMAN */
0382     ome = &omt[OMI_FMAN];
0383     ome->moe[IOE_READ_IDX]  = EOE_VALID | EOE_READI;
0384     ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
0385 
0386     /* Configure OMI_QMAN private */
0387     ome = &omt[OMI_QMAN_PRIV];
0388     ome->moe[IOE_READ_IDX]  = EOE_VALID | EOE_READ;
0389     ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
0390     ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA;
0391     ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSA;
0392 
0393     /* Configure OMI_CAAM */
0394     ome = &omt[OMI_CAAM];
0395     ome->moe[IOE_READ_IDX]  = EOE_VALID | EOE_READI;
0396     ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
0397 }
0398 
0399 /*
0400  * Get the maximum number of PAACT table entries
0401  * and subwindows supported by PAMU
0402  */
0403 static void get_pamu_cap_values(unsigned long pamu_reg_base)
0404 {
0405     u32 pc_val;
0406 
0407     pc_val = in_be32((u32 *)(pamu_reg_base + PAMU_PC3));
0408     /* Maximum number of subwindows per liodn */
0409     max_subwindow_count = 1 << (1 + PAMU_PC3_MWCE(pc_val));
0410 }
0411 
0412 /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
0413 static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
0414               phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
0415               phys_addr_t omt_phys)
0416 {
0417     u32 *pc;
0418     struct pamu_mmap_regs *pamu_regs;
0419 
0420     pc = (u32 *) (pamu_reg_base + PAMU_PC);
0421     pamu_regs = (struct pamu_mmap_regs *)
0422         (pamu_reg_base + PAMU_MMAP_REGS_BASE);
0423 
0424     /* set up pointers to corenet control blocks */
0425 
0426     out_be32(&pamu_regs->ppbah, upper_32_bits(ppaact_phys));
0427     out_be32(&pamu_regs->ppbal, lower_32_bits(ppaact_phys));
0428     ppaact_phys = ppaact_phys + PAACT_SIZE;
0429     out_be32(&pamu_regs->pplah, upper_32_bits(ppaact_phys));
0430     out_be32(&pamu_regs->pplal, lower_32_bits(ppaact_phys));
0431 
0432     out_be32(&pamu_regs->spbah, upper_32_bits(spaact_phys));
0433     out_be32(&pamu_regs->spbal, lower_32_bits(spaact_phys));
0434     spaact_phys = spaact_phys + SPAACT_SIZE;
0435     out_be32(&pamu_regs->splah, upper_32_bits(spaact_phys));
0436     out_be32(&pamu_regs->splal, lower_32_bits(spaact_phys));
0437 
0438     out_be32(&pamu_regs->obah, upper_32_bits(omt_phys));
0439     out_be32(&pamu_regs->obal, lower_32_bits(omt_phys));
0440     omt_phys = omt_phys + OMT_SIZE;
0441     out_be32(&pamu_regs->olah, upper_32_bits(omt_phys));
0442     out_be32(&pamu_regs->olal, lower_32_bits(omt_phys));
0443 
0444     /*
0445      * set PAMU enable bit,
0446      * allow ppaact & omt to be cached
0447      * & enable PAMU access violation interrupts.
0448      */
0449 
0450     out_be32((u32 *)(pamu_reg_base + PAMU_PICS),
0451          PAMU_ACCESS_VIOLATION_ENABLE);
0452     out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC);
0453     return 0;
0454 }
0455 
0456 /* Enable all device LIODNS */
0457 static void setup_liodns(void)
0458 {
0459     int i, len;
0460     struct paace *ppaace;
0461     struct device_node *node = NULL;
0462     const u32 *prop;
0463 
0464     for_each_node_with_property(node, "fsl,liodn") {
0465         prop = of_get_property(node, "fsl,liodn", &len);
0466         for (i = 0; i < len / sizeof(u32); i++) {
0467             int liodn;
0468 
0469             liodn = be32_to_cpup(&prop[i]);
0470             if (liodn >= PAACE_NUMBER_ENTRIES) {
0471                 pr_debug("Invalid LIODN value %d\n", liodn);
0472                 continue;
0473             }
0474             ppaace = pamu_get_ppaace(liodn);
0475             pamu_init_ppaace(ppaace);
0476             /* window size is 2^(WSE+1) bytes */
0477             set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, 35);
0478             ppaace->wbah = 0;
0479             set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0);
0480             set_bf(ppaace->impl_attr, PAACE_IA_ATM,
0481                    PAACE_ATM_NO_XLATE);
0482             set_bf(ppaace->addr_bitfields, PAACE_AF_AP,
0483                    PAACE_AP_PERMS_ALL);
0484             if (of_device_is_compatible(node, "fsl,qman-portal"))
0485                 setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE);
0486             if (of_device_is_compatible(node, "fsl,qman"))
0487                 setup_qbman_paace(ppaace, QMAN_PAACE);
0488             if (of_device_is_compatible(node, "fsl,bman"))
0489                 setup_qbman_paace(ppaace, BMAN_PAACE);
0490             mb();
0491             pamu_enable_liodn(liodn);
0492         }
0493     }
0494 }
0495 
0496 static irqreturn_t pamu_av_isr(int irq, void *arg)
0497 {
0498     struct pamu_isr_data *data = arg;
0499     phys_addr_t phys;
0500     unsigned int i, j, ret;
0501 
0502     pr_emerg("access violation interrupt\n");
0503 
0504     for (i = 0; i < data->count; i++) {
0505         void __iomem *p = data->pamu_reg_base + i * PAMU_OFFSET;
0506         u32 pics = in_be32(p + PAMU_PICS);
0507 
0508         if (pics & PAMU_ACCESS_VIOLATION_STAT) {
0509             u32 avs1 = in_be32(p + PAMU_AVS1);
0510             struct paace *paace;
0511 
0512             pr_emerg("POES1=%08x\n", in_be32(p + PAMU_POES1));
0513             pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2));
0514             pr_emerg("AVS1=%08x\n", avs1);
0515             pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2));
0516             pr_emerg("AVA=%016llx\n",
0517                  make64(in_be32(p + PAMU_AVAH),
0518                     in_be32(p + PAMU_AVAL)));
0519             pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD));
0520             pr_emerg("POEA=%016llx\n",
0521                  make64(in_be32(p + PAMU_POEAH),
0522                     in_be32(p + PAMU_POEAL)));
0523 
0524             phys = make64(in_be32(p + PAMU_POEAH),
0525                       in_be32(p + PAMU_POEAL));
0526 
0527             /* Assume that POEA points to a PAACE */
0528             if (phys) {
0529                 u32 *paace = phys_to_virt(phys);
0530 
0531                 /* Only the first four words are relevant */
0532                 for (j = 0; j < 4; j++)
0533                     pr_emerg("PAACE[%u]=%08x\n",
0534                          j, in_be32(paace + j));
0535             }
0536 
0537             /* clear access violation condition */
0538             out_be32(p + PAMU_AVS1, avs1 & PAMU_AV_MASK);
0539             paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT);
0540             BUG_ON(!paace);
0541             /* check if we got a violation for a disabled LIODN */
0542             if (!get_bf(paace->addr_bitfields, PAACE_AF_V)) {
0543                 /*
0544                  * As per hardware erratum A-003638, access
0545                  * violation can be reported for a disabled
0546                  * LIODN. If we hit that condition, disable
0547                  * access violation reporting.
0548                  */
0549                 pics &= ~PAMU_ACCESS_VIOLATION_ENABLE;
0550             } else {
0551                 /* Disable the LIODN */
0552                 ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT);
0553                 BUG_ON(ret);
0554                 pr_emerg("Disabling liodn %x\n",
0555                      avs1 >> PAMU_AVS1_LIODN_SHIFT);
0556             }
0557             out_be32((p + PAMU_PICS), pics);
0558         }
0559     }
0560 
0561     return IRQ_HANDLED;
0562 }
0563 
0564 #define LAWAR_EN        0x80000000
0565 #define LAWAR_TARGET_MASK   0x0FF00000
0566 #define LAWAR_TARGET_SHIFT  20
0567 #define LAWAR_SIZE_MASK     0x0000003F
0568 #define LAWAR_CSDID_MASK    0x000FF000
0569 #define LAWAR_CSDID_SHIFT   12
0570 
0571 #define LAW_SIZE_4K     0xb
0572 
0573 struct ccsr_law {
0574     u32 lawbarh;    /* LAWn base address high */
0575     u32 lawbarl;    /* LAWn base address low */
0576     u32 lawar;      /* LAWn attributes */
0577     u32 reserved;
0578 };
0579 
0580 /*
0581  * Create a coherence subdomain for a given memory block.
0582  */
0583 static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
0584 {
0585     struct device_node *np;
0586     const __be32 *iprop;
0587     void __iomem *lac = NULL;   /* Local Access Control registers */
0588     struct ccsr_law __iomem *law;
0589     void __iomem *ccm = NULL;
0590     u32 __iomem *csdids;
0591     unsigned int i, num_laws, num_csds;
0592     u32 law_target = 0;
0593     u32 csd_id = 0;
0594     int ret = 0;
0595 
0596     np = of_find_compatible_node(NULL, NULL, "fsl,corenet-law");
0597     if (!np)
0598         return -ENODEV;
0599 
0600     iprop = of_get_property(np, "fsl,num-laws", NULL);
0601     if (!iprop) {
0602         ret = -ENODEV;
0603         goto error;
0604     }
0605 
0606     num_laws = be32_to_cpup(iprop);
0607     if (!num_laws) {
0608         ret = -ENODEV;
0609         goto error;
0610     }
0611 
0612     lac = of_iomap(np, 0);
0613     if (!lac) {
0614         ret = -ENODEV;
0615         goto error;
0616     }
0617 
0618     /* LAW registers are at offset 0xC00 */
0619     law = lac + 0xC00;
0620 
0621     of_node_put(np);
0622 
0623     np = of_find_compatible_node(NULL, NULL, "fsl,corenet-cf");
0624     if (!np) {
0625         ret = -ENODEV;
0626         goto error;
0627     }
0628 
0629     iprop = of_get_property(np, "fsl,ccf-num-csdids", NULL);
0630     if (!iprop) {
0631         ret = -ENODEV;
0632         goto error;
0633     }
0634 
0635     num_csds = be32_to_cpup(iprop);
0636     if (!num_csds) {
0637         ret = -ENODEV;
0638         goto error;
0639     }
0640 
0641     ccm = of_iomap(np, 0);
0642     if (!ccm) {
0643         ret = -ENOMEM;
0644         goto error;
0645     }
0646 
0647     /* The undocumented CSDID registers are at offset 0x600 */
0648     csdids = ccm + 0x600;
0649 
0650     of_node_put(np);
0651     np = NULL;
0652 
0653     /* Find an unused coherence subdomain ID */
0654     for (csd_id = 0; csd_id < num_csds; csd_id++) {
0655         if (!csdids[csd_id])
0656             break;
0657     }
0658 
0659     /* Store the Port ID in the (undocumented) proper CIDMRxx register */
0660     csdids[csd_id] = csd_port_id;
0661 
0662     /* Find the DDR LAW that maps to our buffer. */
0663     for (i = 0; i < num_laws; i++) {
0664         if (law[i].lawar & LAWAR_EN) {
0665             phys_addr_t law_start, law_end;
0666 
0667             law_start = make64(law[i].lawbarh, law[i].lawbarl);
0668             law_end = law_start +
0669                 (2ULL << (law[i].lawar & LAWAR_SIZE_MASK));
0670 
0671             if (law_start <= phys && phys < law_end) {
0672                 law_target = law[i].lawar & LAWAR_TARGET_MASK;
0673                 break;
0674             }
0675         }
0676     }
0677 
0678     if (i == 0 || i == num_laws) {
0679         /* This should never happen */
0680         ret = -ENOENT;
0681         goto error;
0682     }
0683 
0684     /* Find a free LAW entry */
0685     while (law[--i].lawar & LAWAR_EN) {
0686         if (i == 0) {
0687             /* No higher priority LAW slots available */
0688             ret = -ENOENT;
0689             goto error;
0690         }
0691     }
0692 
0693     law[i].lawbarh = upper_32_bits(phys);
0694     law[i].lawbarl = lower_32_bits(phys);
0695     wmb();
0696     law[i].lawar = LAWAR_EN | law_target | (csd_id << LAWAR_CSDID_SHIFT) |
0697         (LAW_SIZE_4K + get_order(size));
0698     wmb();
0699 
0700 error:
0701     if (ccm)
0702         iounmap(ccm);
0703 
0704     if (lac)
0705         iounmap(lac);
0706 
0707     if (np)
0708         of_node_put(np);
0709 
0710     return ret;
0711 }
0712 
0713 /*
0714  * Table of SVRs and the corresponding PORT_ID values. Port ID corresponds to a
0715  * bit map of snoopers for a given range of memory mapped by a LAW.
0716  *
0717  * All future CoreNet-enabled SOCs will have this erratum(A-004510) fixed, so this
0718  * table should never need to be updated.  SVRs are guaranteed to be unique, so
0719  * there is no worry that a future SOC will inadvertently have one of these
0720  * values.
0721  */
0722 static const struct {
0723     u32 svr;
0724     u32 port_id;
0725 } port_id_map[] = {
0726     {(SVR_P2040 << 8) | 0x10, 0xFF000000},  /* P2040 1.0 */
0727     {(SVR_P2040 << 8) | 0x11, 0xFF000000},  /* P2040 1.1 */
0728     {(SVR_P2041 << 8) | 0x10, 0xFF000000},  /* P2041 1.0 */
0729     {(SVR_P2041 << 8) | 0x11, 0xFF000000},  /* P2041 1.1 */
0730     {(SVR_P3041 << 8) | 0x10, 0xFF000000},  /* P3041 1.0 */
0731     {(SVR_P3041 << 8) | 0x11, 0xFF000000},  /* P3041 1.1 */
0732     {(SVR_P4040 << 8) | 0x20, 0xFFF80000},  /* P4040 2.0 */
0733     {(SVR_P4080 << 8) | 0x20, 0xFFF80000},  /* P4080 2.0 */
0734     {(SVR_P5010 << 8) | 0x10, 0xFC000000},  /* P5010 1.0 */
0735     {(SVR_P5010 << 8) | 0x20, 0xFC000000},  /* P5010 2.0 */
0736     {(SVR_P5020 << 8) | 0x10, 0xFC000000},  /* P5020 1.0 */
0737     {(SVR_P5021 << 8) | 0x10, 0xFF800000},  /* P5021 1.0 */
0738     {(SVR_P5040 << 8) | 0x10, 0xFF800000},  /* P5040 1.0 */
0739 };
0740 
0741 #define SVR_SECURITY    0x80000 /* The Security (E) bit */
0742 
0743 static int fsl_pamu_probe(struct platform_device *pdev)
0744 {
0745     struct device *dev = &pdev->dev;
0746     void __iomem *pamu_regs = NULL;
0747     struct ccsr_guts __iomem *guts_regs = NULL;
0748     u32 pamubypenr, pamu_counter;
0749     unsigned long pamu_reg_off;
0750     unsigned long pamu_reg_base;
0751     struct pamu_isr_data *data = NULL;
0752     struct device_node *guts_node;
0753     u64 size;
0754     struct page *p;
0755     int ret = 0;
0756     int irq;
0757     phys_addr_t ppaact_phys;
0758     phys_addr_t spaact_phys;
0759     struct ome *omt;
0760     phys_addr_t omt_phys;
0761     size_t mem_size = 0;
0762     unsigned int order = 0;
0763     u32 csd_port_id = 0;
0764     unsigned i;
0765     /*
0766      * enumerate all PAMUs and allocate and setup PAMU tables
0767      * for each of them,
0768      * NOTE : All PAMUs share the same LIODN tables.
0769      */
0770 
0771     if (WARN_ON(probed))
0772         return -EBUSY;
0773 
0774     pamu_regs = of_iomap(dev->of_node, 0);
0775     if (!pamu_regs) {
0776         dev_err(dev, "ioremap of PAMU node failed\n");
0777         return -ENOMEM;
0778     }
0779     of_get_address(dev->of_node, 0, &size, NULL);
0780 
0781     irq = irq_of_parse_and_map(dev->of_node, 0);
0782     if (irq == NO_IRQ) {
0783         dev_warn(dev, "no interrupts listed in PAMU node\n");
0784         goto error;
0785     }
0786 
0787     data = kzalloc(sizeof(*data), GFP_KERNEL);
0788     if (!data) {
0789         ret = -ENOMEM;
0790         goto error;
0791     }
0792     data->pamu_reg_base = pamu_regs;
0793     data->count = size / PAMU_OFFSET;
0794 
0795     /* The ISR needs access to the regs, so we won't iounmap them */
0796     ret = request_irq(irq, pamu_av_isr, 0, "pamu", data);
0797     if (ret < 0) {
0798         dev_err(dev, "error %i installing ISR for irq %i\n", ret, irq);
0799         goto error;
0800     }
0801 
0802     guts_node = of_find_matching_node(NULL, guts_device_ids);
0803     if (!guts_node) {
0804         dev_err(dev, "could not find GUTS node %pOF\n", dev->of_node);
0805         ret = -ENODEV;
0806         goto error;
0807     }
0808 
0809     guts_regs = of_iomap(guts_node, 0);
0810     of_node_put(guts_node);
0811     if (!guts_regs) {
0812         dev_err(dev, "ioremap of GUTS node failed\n");
0813         ret = -ENODEV;
0814         goto error;
0815     }
0816 
0817     /* read in the PAMU capability registers */
0818     get_pamu_cap_values((unsigned long)pamu_regs);
0819     /*
0820      * To simplify the allocation of a coherency domain, we allocate the
0821      * PAACT and the OMT in the same memory buffer.  Unfortunately, this
0822      * wastes more memory compared to allocating the buffers separately.
0823      */
0824     /* Determine how much memory we need */
0825     mem_size = (PAGE_SIZE << get_order(PAACT_SIZE)) +
0826         (PAGE_SIZE << get_order(SPAACT_SIZE)) +
0827         (PAGE_SIZE << get_order(OMT_SIZE));
0828     order = get_order(mem_size);
0829 
0830     p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
0831     if (!p) {
0832         dev_err(dev, "unable to allocate PAACT/SPAACT/OMT block\n");
0833         ret = -ENOMEM;
0834         goto error;
0835     }
0836 
0837     ppaact = page_address(p);
0838     ppaact_phys = page_to_phys(p);
0839 
0840     /* Make sure the memory is naturally aligned */
0841     if (ppaact_phys & ((PAGE_SIZE << order) - 1)) {
0842         dev_err(dev, "PAACT/OMT block is unaligned\n");
0843         ret = -ENOMEM;
0844         goto error;
0845     }
0846 
0847     spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE));
0848     omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE));
0849 
0850     dev_dbg(dev, "ppaact virt=%p phys=%pa\n", ppaact, &ppaact_phys);
0851 
0852     /* Check to see if we need to implement the work-around on this SOC */
0853 
0854     /* Determine the Port ID for our coherence subdomain */
0855     for (i = 0; i < ARRAY_SIZE(port_id_map); i++) {
0856         if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) {
0857             csd_port_id = port_id_map[i].port_id;
0858             dev_dbg(dev, "found matching SVR %08x\n",
0859                 port_id_map[i].svr);
0860             break;
0861         }
0862     }
0863 
0864     if (csd_port_id) {
0865         dev_dbg(dev, "creating coherency subdomain at address %pa, size %zu, port id 0x%08x",
0866             &ppaact_phys, mem_size, csd_port_id);
0867 
0868         ret = create_csd(ppaact_phys, mem_size, csd_port_id);
0869         if (ret) {
0870             dev_err(dev, "could not create coherence subdomain\n");
0871             return ret;
0872         }
0873     }
0874 
0875     spaact_phys = virt_to_phys(spaact);
0876     omt_phys = virt_to_phys(omt);
0877 
0878     pamubypenr = in_be32(&guts_regs->pamubypenr);
0879 
0880     for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size;
0881          pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) {
0882 
0883         pamu_reg_base = (unsigned long)pamu_regs + pamu_reg_off;
0884         setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys,
0885                    spaact_phys, omt_phys);
0886         /* Disable PAMU bypass for this PAMU */
0887         pamubypenr &= ~pamu_counter;
0888     }
0889 
0890     setup_omt(omt);
0891 
0892     /* Enable all relevant PAMU(s) */
0893     out_be32(&guts_regs->pamubypenr, pamubypenr);
0894 
0895     iounmap(guts_regs);
0896 
0897     /* Enable DMA for the LIODNs in the device tree */
0898 
0899     setup_liodns();
0900 
0901     probed = true;
0902 
0903     return 0;
0904 
0905 error:
0906     if (irq != NO_IRQ)
0907         free_irq(irq, data);
0908 
0909     kfree_sensitive(data);
0910 
0911     if (pamu_regs)
0912         iounmap(pamu_regs);
0913 
0914     if (guts_regs)
0915         iounmap(guts_regs);
0916 
0917     if (ppaact)
0918         free_pages((unsigned long)ppaact, order);
0919 
0920     ppaact = NULL;
0921 
0922     return ret;
0923 }
0924 
0925 static struct platform_driver fsl_of_pamu_driver = {
0926     .driver = {
0927         .name = "fsl-of-pamu",
0928     },
0929     .probe = fsl_pamu_probe,
0930 };
0931 
0932 static __init int fsl_pamu_init(void)
0933 {
0934     struct platform_device *pdev = NULL;
0935     struct device_node *np;
0936     int ret;
0937 
0938     /*
0939      * The normal OF process calls the probe function at some
0940      * indeterminate later time, after most drivers have loaded.  This is
0941      * too late for us, because PAMU clients (like the Qman driver)
0942      * depend on PAMU being initialized early.
0943      *
0944      * So instead, we "manually" call our probe function by creating the
0945      * platform devices ourselves.
0946      */
0947 
0948     /*
0949      * We assume that there is only one PAMU node in the device tree.  A
0950      * single PAMU node represents all of the PAMU devices in the SOC
0951      * already.   Everything else already makes that assumption, and the
0952      * binding for the PAMU nodes doesn't allow for any parent-child
0953      * relationships anyway.  In other words, support for more than one
0954      * PAMU node would require significant changes to a lot of code.
0955      */
0956 
0957     np = of_find_compatible_node(NULL, NULL, "fsl,pamu");
0958     if (!np) {
0959         pr_err("could not find a PAMU node\n");
0960         return -ENODEV;
0961     }
0962 
0963     ret = platform_driver_register(&fsl_of_pamu_driver);
0964     if (ret) {
0965         pr_err("could not register driver (err=%i)\n", ret);
0966         goto error_driver_register;
0967     }
0968 
0969     pdev = platform_device_alloc("fsl-of-pamu", 0);
0970     if (!pdev) {
0971         pr_err("could not allocate device %pOF\n", np);
0972         ret = -ENOMEM;
0973         goto error_device_alloc;
0974     }
0975     pdev->dev.of_node = of_node_get(np);
0976 
0977     ret = pamu_domain_init();
0978     if (ret)
0979         goto error_device_add;
0980 
0981     ret = platform_device_add(pdev);
0982     if (ret) {
0983         pr_err("could not add device %pOF (err=%i)\n", np, ret);
0984         goto error_device_add;
0985     }
0986 
0987     return 0;
0988 
0989 error_device_add:
0990     of_node_put(pdev->dev.of_node);
0991     pdev->dev.of_node = NULL;
0992 
0993     platform_device_put(pdev);
0994 
0995 error_device_alloc:
0996     platform_driver_unregister(&fsl_of_pamu_driver);
0997 
0998 error_driver_register:
0999     of_node_put(np);
1000 
1001     return ret;
1002 }
1003 arch_initcall(fsl_pamu_init);