0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #include "qman_priv.h"
0032
0033 struct qman_portal *qman_dma_portal;
0034 EXPORT_SYMBOL(qman_dma_portal);
0035
0036
0037 #define CONFIG_FSL_DPA_PIRQ_SLOW 1
0038 #define CONFIG_FSL_DPA_PIRQ_FAST 1
0039
0040 static struct cpumask portal_cpus;
0041 static int __qman_portals_probed;
0042
0043 static DEFINE_SPINLOCK(qman_lock);
0044
0045 static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
0046 {
0047 #ifdef CONFIG_FSL_PAMU
0048 struct device *dev = pcfg->dev;
0049 int ret;
0050
0051 pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
0052 if (!pcfg->iommu_domain) {
0053 dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
0054 goto no_iommu;
0055 }
0056 ret = fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu);
0057 if (ret < 0) {
0058 dev_err(dev, "%s(): fsl_pamu_configure_l1_stash() = %d",
0059 __func__, ret);
0060 goto out_domain_free;
0061 }
0062 ret = iommu_attach_device(pcfg->iommu_domain, dev);
0063 if (ret < 0) {
0064 dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
0065 ret);
0066 goto out_domain_free;
0067 }
0068
0069 no_iommu:
0070 #endif
0071 qman_set_sdest(pcfg->channel, cpu);
0072
0073 return;
0074
0075 #ifdef CONFIG_FSL_PAMU
0076 out_domain_free:
0077 iommu_domain_free(pcfg->iommu_domain);
0078 pcfg->iommu_domain = NULL;
0079 #endif
0080 }
0081
0082 static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
0083 {
0084 struct qman_portal *p;
0085 u32 irq_sources = 0;
0086
0087
0088 qman_liodn_fixup(pcfg->channel);
0089
0090 pcfg->iommu_domain = NULL;
0091 portal_set_cpu(pcfg, pcfg->cpu);
0092
0093 p = qman_create_affine_portal(pcfg, NULL);
0094 if (!p) {
0095 dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
0096 __func__, pcfg->cpu);
0097 return NULL;
0098 }
0099
0100
0101 #ifdef CONFIG_FSL_DPA_PIRQ_SLOW
0102 irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
0103 QM_PIRQ_CSCI;
0104 #endif
0105 #ifdef CONFIG_FSL_DPA_PIRQ_FAST
0106 irq_sources |= QM_PIRQ_DQRI;
0107 #endif
0108 qman_p_irqsource_add(p, irq_sources);
0109
0110 spin_lock(&qman_lock);
0111 if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
0112
0113 qman_init_cgr_all();
0114 }
0115
0116 if (!qman_dma_portal)
0117 qman_dma_portal = p;
0118
0119 spin_unlock(&qman_lock);
0120
0121 dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
0122
0123 return p;
0124 }
0125
0126 static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
0127 unsigned int cpu)
0128 {
0129 #ifdef CONFIG_FSL_PAMU
0130 if (pcfg->iommu_domain) {
0131 if (fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu) < 0) {
0132 dev_err(pcfg->dev,
0133 "Failed to update pamu stash setting\n");
0134 return;
0135 }
0136 }
0137 #endif
0138 qman_set_sdest(pcfg->channel, cpu);
0139 }
0140
0141 static int qman_offline_cpu(unsigned int cpu)
0142 {
0143 struct qman_portal *p;
0144 const struct qm_portal_config *pcfg;
0145
0146 p = affine_portals[cpu];
0147 if (p) {
0148 pcfg = qman_get_qm_portal_config(p);
0149 if (pcfg) {
0150
0151 cpu = cpumask_any_but(cpu_online_mask, cpu);
0152 irq_set_affinity(pcfg->irq, cpumask_of(cpu));
0153 qman_portal_update_sdest(pcfg, cpu);
0154 }
0155 }
0156 return 0;
0157 }
0158
0159 static int qman_online_cpu(unsigned int cpu)
0160 {
0161 struct qman_portal *p;
0162 const struct qm_portal_config *pcfg;
0163
0164 p = affine_portals[cpu];
0165 if (p) {
0166 pcfg = qman_get_qm_portal_config(p);
0167 if (pcfg) {
0168 irq_set_affinity(pcfg->irq, cpumask_of(cpu));
0169 qman_portal_update_sdest(pcfg, cpu);
0170 }
0171 }
0172 return 0;
0173 }
0174
0175 int qman_portals_probed(void)
0176 {
0177 return __qman_portals_probed;
0178 }
0179 EXPORT_SYMBOL_GPL(qman_portals_probed);
0180
0181 static int qman_portal_probe(struct platform_device *pdev)
0182 {
0183 struct device *dev = &pdev->dev;
0184 struct device_node *node = dev->of_node;
0185 struct qm_portal_config *pcfg;
0186 struct resource *addr_phys[2];
0187 int irq, cpu, err, i;
0188 u32 val;
0189
0190 err = qman_is_probed();
0191 if (!err)
0192 return -EPROBE_DEFER;
0193 if (err < 0) {
0194 dev_err(&pdev->dev, "failing probe due to qman probe error\n");
0195 return -ENODEV;
0196 }
0197
0198 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
0199 if (!pcfg) {
0200 __qman_portals_probed = -1;
0201 return -ENOMEM;
0202 }
0203
0204 pcfg->dev = dev;
0205
0206 addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
0207 DPAA_PORTAL_CE);
0208 if (!addr_phys[0]) {
0209 dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
0210 goto err_ioremap1;
0211 }
0212
0213 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
0214 DPAA_PORTAL_CI);
0215 if (!addr_phys[1]) {
0216 dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
0217 goto err_ioremap1;
0218 }
0219
0220 err = of_property_read_u32(node, "cell-index", &val);
0221 if (err) {
0222 dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
0223 __qman_portals_probed = -1;
0224 return err;
0225 }
0226 pcfg->channel = val;
0227 pcfg->cpu = -1;
0228 irq = platform_get_irq(pdev, 0);
0229 if (irq <= 0)
0230 goto err_ioremap1;
0231 pcfg->irq = irq;
0232
0233 pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
0234 resource_size(addr_phys[0]),
0235 QBMAN_MEMREMAP_ATTR);
0236 if (!pcfg->addr_virt_ce) {
0237 dev_err(dev, "memremap::CE failed\n");
0238 goto err_ioremap1;
0239 }
0240
0241 pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
0242 resource_size(addr_phys[1]));
0243 if (!pcfg->addr_virt_ci) {
0244 dev_err(dev, "ioremap::CI failed\n");
0245 goto err_ioremap2;
0246 }
0247
0248 pcfg->pools = qm_get_pools_sdqcr();
0249
0250 spin_lock(&qman_lock);
0251 cpu = cpumask_first_zero(&portal_cpus);
0252 if (cpu >= nr_cpu_ids) {
0253 __qman_portals_probed = 1;
0254
0255 spin_unlock(&qman_lock);
0256 goto check_cleanup;
0257 }
0258
0259 cpumask_set_cpu(cpu, &portal_cpus);
0260 spin_unlock(&qman_lock);
0261 pcfg->cpu = cpu;
0262
0263 if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
0264 dev_err(dev, "dma_set_mask() failed\n");
0265 goto err_portal_init;
0266 }
0267
0268 if (!init_pcfg(pcfg)) {
0269 dev_err(dev, "portal init failed\n");
0270 goto err_portal_init;
0271 }
0272
0273
0274 if (!cpu_online(cpu))
0275 qman_offline_cpu(cpu);
0276
0277 check_cleanup:
0278 if (__qman_portals_probed == 1 && qman_requires_cleanup()) {
0279
0280
0281
0282
0283 for (i = 0; i < qm_get_fqid_maxcnt(); i++) {
0284 err = qman_shutdown_fq(i);
0285 if (err) {
0286 dev_err(dev, "Failed to shutdown frame queue %d\n",
0287 i);
0288 goto err_portal_init;
0289 }
0290 }
0291 qman_done_cleanup();
0292 }
0293
0294 return 0;
0295
0296 err_portal_init:
0297 iounmap(pcfg->addr_virt_ci);
0298 err_ioremap2:
0299 memunmap(pcfg->addr_virt_ce);
0300 err_ioremap1:
0301 __qman_portals_probed = -1;
0302
0303 return -ENXIO;
0304 }
0305
0306 static const struct of_device_id qman_portal_ids[] = {
0307 {
0308 .compatible = "fsl,qman-portal",
0309 },
0310 {}
0311 };
0312 MODULE_DEVICE_TABLE(of, qman_portal_ids);
0313
0314 static struct platform_driver qman_portal_driver = {
0315 .driver = {
0316 .name = KBUILD_MODNAME,
0317 .of_match_table = qman_portal_ids,
0318 },
0319 .probe = qman_portal_probe,
0320 };
0321
0322 static int __init qman_portal_driver_register(struct platform_driver *drv)
0323 {
0324 int ret;
0325
0326 ret = platform_driver_register(drv);
0327 if (ret < 0)
0328 return ret;
0329
0330 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
0331 "soc/qman_portal:online",
0332 qman_online_cpu, qman_offline_cpu);
0333 if (ret < 0) {
0334 pr_err("qman: failed to register hotplug callbacks.\n");
0335 platform_driver_unregister(drv);
0336 return ret;
0337 }
0338 return 0;
0339 }
0340
0341 module_driver(qman_portal_driver,
0342 qman_portal_driver_register, platform_driver_unregister);