Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * clk-xgene.c - AppliedMicro X-Gene Clock Interface
0004  *
0005  * Copyright (c) 2013, Applied Micro Circuits Corporation
0006  * Author: Loc Ho <lho@apm.com>
0007  */
0008 #include <linux/module.h>
0009 #include <linux/spinlock.h>
0010 #include <linux/io.h>
0011 #include <linux/of.h>
0012 #include <linux/clkdev.h>
0013 #include <linux/clk-provider.h>
0014 #include <linux/of_address.h>
0015 
0016 /* Register SCU_PCPPLL bit fields */
0017 #define N_DIV_RD(src)           ((src) & 0x000001ff)
0018 #define SC_N_DIV_RD(src)        ((src) & 0x0000007f)
0019 #define SC_OUTDIV2(src)         (((src) & 0x00000100) >> 8)
0020 
0021 /* Register SCU_SOCPLL bit fields */
0022 #define CLKR_RD(src)            (((src) & 0x07000000)>>24)
0023 #define CLKOD_RD(src)           (((src) & 0x00300000)>>20)
0024 #define REGSPEC_RESET_F1_MASK       0x00010000
0025 #define CLKF_RD(src)            (((src) & 0x000001ff))
0026 
0027 #define XGENE_CLK_DRIVER_VER        "0.1"
0028 
0029 static DEFINE_SPINLOCK(clk_lock);
0030 
0031 static inline u32 xgene_clk_read(void __iomem *csr)
0032 {
0033     return readl_relaxed(csr);
0034 }
0035 
0036 static inline void xgene_clk_write(u32 data, void __iomem *csr)
0037 {
0038     writel_relaxed(data, csr);
0039 }
0040 
0041 /* PLL Clock */
0042 enum xgene_pll_type {
0043     PLL_TYPE_PCP = 0,
0044     PLL_TYPE_SOC = 1,
0045 };
0046 
0047 struct xgene_clk_pll {
0048     struct clk_hw   hw;
0049     void __iomem    *reg;
0050     spinlock_t  *lock;
0051     u32     pll_offset;
0052     enum xgene_pll_type type;
0053     int     version;
0054 };
0055 
0056 #define to_xgene_clk_pll(_hw) container_of(_hw, struct xgene_clk_pll, hw)
0057 
0058 static int xgene_clk_pll_is_enabled(struct clk_hw *hw)
0059 {
0060     struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw);
0061     u32 data;
0062 
0063     data = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
0064     pr_debug("%s pll %s\n", clk_hw_get_name(hw),
0065         data & REGSPEC_RESET_F1_MASK ? "disabled" : "enabled");
0066 
0067     return data & REGSPEC_RESET_F1_MASK ? 0 : 1;
0068 }
0069 
0070 static unsigned long xgene_clk_pll_recalc_rate(struct clk_hw *hw,
0071                 unsigned long parent_rate)
0072 {
0073     struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw);
0074     unsigned long fref;
0075     unsigned long fvco;
0076     u32 pll;
0077     u32 nref;
0078     u32 nout;
0079     u32 nfb;
0080 
0081     pll = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
0082 
0083     if (pllclk->version <= 1) {
0084         if (pllclk->type == PLL_TYPE_PCP) {
0085             /*
0086             * PLL VCO = Reference clock * NF
0087             * PCP PLL = PLL_VCO / 2
0088             */
0089             nout = 2;
0090             fvco = parent_rate * (N_DIV_RD(pll) + 4);
0091         } else {
0092             /*
0093             * Fref = Reference Clock / NREF;
0094             * Fvco = Fref * NFB;
0095             * Fout = Fvco / NOUT;
0096             */
0097             nref = CLKR_RD(pll) + 1;
0098             nout = CLKOD_RD(pll) + 1;
0099             nfb = CLKF_RD(pll);
0100             fref = parent_rate / nref;
0101             fvco = fref * nfb;
0102         }
0103     } else {
0104         /*
0105          * fvco = Reference clock * FBDIVC
0106          * PLL freq = fvco / NOUT
0107          */
0108         nout = SC_OUTDIV2(pll) ? 2 : 3;
0109         fvco = parent_rate * SC_N_DIV_RD(pll);
0110     }
0111     pr_debug("%s pll recalc rate %ld parent %ld version %d\n",
0112          clk_hw_get_name(hw), fvco / nout, parent_rate,
0113          pllclk->version);
0114 
0115     return fvco / nout;
0116 }
0117 
0118 static const struct clk_ops xgene_clk_pll_ops = {
0119     .is_enabled = xgene_clk_pll_is_enabled,
0120     .recalc_rate = xgene_clk_pll_recalc_rate,
0121 };
0122 
0123 static struct clk *xgene_register_clk_pll(struct device *dev,
0124     const char *name, const char *parent_name,
0125     unsigned long flags, void __iomem *reg, u32 pll_offset,
0126     u32 type, spinlock_t *lock, int version)
0127 {
0128     struct xgene_clk_pll *apmclk;
0129     struct clk *clk;
0130     struct clk_init_data init;
0131 
0132     /* allocate the APM clock structure */
0133     apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
0134     if (!apmclk)
0135         return ERR_PTR(-ENOMEM);
0136 
0137     init.name = name;
0138     init.ops = &xgene_clk_pll_ops;
0139     init.flags = flags;
0140     init.parent_names = parent_name ? &parent_name : NULL;
0141     init.num_parents = parent_name ? 1 : 0;
0142 
0143     apmclk->version = version;
0144     apmclk->reg = reg;
0145     apmclk->lock = lock;
0146     apmclk->pll_offset = pll_offset;
0147     apmclk->type = type;
0148     apmclk->hw.init = &init;
0149 
0150     /* Register the clock */
0151     clk = clk_register(dev, &apmclk->hw);
0152     if (IS_ERR(clk)) {
0153         pr_err("%s: could not register clk %s\n", __func__, name);
0154         kfree(apmclk);
0155         return NULL;
0156     }
0157     return clk;
0158 }
0159 
0160 static int xgene_pllclk_version(struct device_node *np)
0161 {
0162     if (of_device_is_compatible(np, "apm,xgene-socpll-clock"))
0163         return 1;
0164     if (of_device_is_compatible(np, "apm,xgene-pcppll-clock"))
0165         return 1;
0166     return 2;
0167 }
0168 
0169 static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_type)
0170 {
0171     const char *clk_name = np->full_name;
0172     struct clk *clk;
0173     void __iomem *reg;
0174     int version = xgene_pllclk_version(np);
0175 
0176     reg = of_iomap(np, 0);
0177     if (!reg) {
0178         pr_err("Unable to map CSR register for %pOF\n", np);
0179         return;
0180     }
0181     of_property_read_string(np, "clock-output-names", &clk_name);
0182     clk = xgene_register_clk_pll(NULL,
0183             clk_name, of_clk_get_parent_name(np, 0),
0184             0, reg, 0, pll_type, &clk_lock,
0185             version);
0186     if (!IS_ERR(clk)) {
0187         of_clk_add_provider(np, of_clk_src_simple_get, clk);
0188         clk_register_clkdev(clk, clk_name, NULL);
0189         pr_debug("Add %s clock PLL\n", clk_name);
0190     }
0191 }
0192 
0193 static void xgene_socpllclk_init(struct device_node *np)
0194 {
0195     xgene_pllclk_init(np, PLL_TYPE_SOC);
0196 }
0197 
0198 static void xgene_pcppllclk_init(struct device_node *np)
0199 {
0200     xgene_pllclk_init(np, PLL_TYPE_PCP);
0201 }
0202 
0203 /**
0204  * struct xgene_clk_pmd - PMD clock
0205  *
0206  * @hw:     handle between common and hardware-specific interfaces
0207  * @reg:    register containing the fractional scale multiplier (scaler)
0208  * @shift:  shift to the unit bit field
0209  * @mask:   mask to the unit bit field
0210  * @denom:  1/denominator unit
0211  * @lock:   register lock
0212  * @flags: XGENE_CLK_PMD_SCALE_INVERTED - By default the scaler is the value read
0213  *  from the register plus one. For example,
0214  *      0 for (0 + 1) / denom,
0215  *      1 for (1 + 1) / denom and etc.
0216  *  If this flag is set, it is
0217  *      0 for (denom - 0) / denom,
0218  *      1 for (denom - 1) / denom and etc.
0219  */
0220 struct xgene_clk_pmd {
0221     struct clk_hw   hw;
0222     void __iomem    *reg;
0223     u8      shift;
0224     u32     mask;
0225     u64     denom;
0226     u32     flags;
0227     spinlock_t  *lock;
0228 };
0229 
0230 #define to_xgene_clk_pmd(_hw) container_of(_hw, struct xgene_clk_pmd, hw)
0231 
0232 #define XGENE_CLK_PMD_SCALE_INVERTED    BIT(0)
0233 #define XGENE_CLK_PMD_SHIFT     8
0234 #define XGENE_CLK_PMD_WIDTH     3
0235 
0236 static unsigned long xgene_clk_pmd_recalc_rate(struct clk_hw *hw,
0237                            unsigned long parent_rate)
0238 {
0239     struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
0240     unsigned long flags = 0;
0241     u64 ret, scale;
0242     u32 val;
0243 
0244     if (fd->lock)
0245         spin_lock_irqsave(fd->lock, flags);
0246     else
0247         __acquire(fd->lock);
0248 
0249     val = readl(fd->reg);
0250 
0251     if (fd->lock)
0252         spin_unlock_irqrestore(fd->lock, flags);
0253     else
0254         __release(fd->lock);
0255 
0256     ret = (u64)parent_rate;
0257 
0258     scale = (val & fd->mask) >> fd->shift;
0259     if (fd->flags & XGENE_CLK_PMD_SCALE_INVERTED)
0260         scale = fd->denom - scale;
0261     else
0262         scale++;
0263 
0264     /* freq = parent_rate * scaler / denom */
0265     do_div(ret, fd->denom);
0266     ret *= scale;
0267     if (ret == 0)
0268         ret = (u64)parent_rate;
0269 
0270     return ret;
0271 }
0272 
0273 static long xgene_clk_pmd_round_rate(struct clk_hw *hw, unsigned long rate,
0274                      unsigned long *parent_rate)
0275 {
0276     struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
0277     u64 ret, scale;
0278 
0279     if (!rate || rate >= *parent_rate)
0280         return *parent_rate;
0281 
0282     /* freq = parent_rate * scaler / denom */
0283     ret = rate * fd->denom;
0284     scale = DIV_ROUND_UP_ULL(ret, *parent_rate);
0285 
0286     ret = (u64)*parent_rate * scale;
0287     do_div(ret, fd->denom);
0288 
0289     return ret;
0290 }
0291 
0292 static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
0293                   unsigned long parent_rate)
0294 {
0295     struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
0296     unsigned long flags = 0;
0297     u64 scale, ret;
0298     u32 val;
0299 
0300     /*
0301      * Compute the scaler:
0302      *
0303      * freq = parent_rate * scaler / denom, or
0304      * scaler = freq * denom / parent_rate
0305      */
0306     ret = rate * fd->denom;
0307     scale = DIV_ROUND_UP_ULL(ret, (u64)parent_rate);
0308 
0309     /* Check if inverted */
0310     if (fd->flags & XGENE_CLK_PMD_SCALE_INVERTED)
0311         scale = fd->denom - scale;
0312     else
0313         scale--;
0314 
0315     if (fd->lock)
0316         spin_lock_irqsave(fd->lock, flags);
0317     else
0318         __acquire(fd->lock);
0319 
0320     val = readl(fd->reg);
0321     val &= ~fd->mask;
0322     val |= (scale << fd->shift);
0323     writel(val, fd->reg);
0324 
0325     if (fd->lock)
0326         spin_unlock_irqrestore(fd->lock, flags);
0327     else
0328         __release(fd->lock);
0329 
0330     return 0;
0331 }
0332 
0333 static const struct clk_ops xgene_clk_pmd_ops = {
0334     .recalc_rate = xgene_clk_pmd_recalc_rate,
0335     .round_rate = xgene_clk_pmd_round_rate,
0336     .set_rate = xgene_clk_pmd_set_rate,
0337 };
0338 
0339 static struct clk *
0340 xgene_register_clk_pmd(struct device *dev,
0341                const char *name, const char *parent_name,
0342                unsigned long flags, void __iomem *reg, u8 shift,
0343                u8 width, u64 denom, u32 clk_flags, spinlock_t *lock)
0344 {
0345     struct xgene_clk_pmd *fd;
0346     struct clk_init_data init;
0347     struct clk *clk;
0348 
0349     fd = kzalloc(sizeof(*fd), GFP_KERNEL);
0350     if (!fd)
0351         return ERR_PTR(-ENOMEM);
0352 
0353     init.name = name;
0354     init.ops = &xgene_clk_pmd_ops;
0355     init.flags = flags;
0356     init.parent_names = parent_name ? &parent_name : NULL;
0357     init.num_parents = parent_name ? 1 : 0;
0358 
0359     fd->reg = reg;
0360     fd->shift = shift;
0361     fd->mask = (BIT(width) - 1) << shift;
0362     fd->denom = denom;
0363     fd->flags = clk_flags;
0364     fd->lock = lock;
0365     fd->hw.init = &init;
0366 
0367     clk = clk_register(dev, &fd->hw);
0368     if (IS_ERR(clk)) {
0369         pr_err("%s: could not register clk %s\n", __func__, name);
0370         kfree(fd);
0371         return NULL;
0372     }
0373 
0374     return clk;
0375 }
0376 
0377 static void xgene_pmdclk_init(struct device_node *np)
0378 {
0379     const char *clk_name = np->full_name;
0380     void __iomem *csr_reg;
0381     struct resource res;
0382     struct clk *clk;
0383     u64 denom;
0384     u32 flags = 0;
0385     int rc;
0386 
0387     /* Check if the entry is disabled */
0388     if (!of_device_is_available(np))
0389         return;
0390 
0391     /* Parse the DTS register for resource */
0392     rc = of_address_to_resource(np, 0, &res);
0393     if (rc != 0) {
0394         pr_err("no DTS register for %pOF\n", np);
0395         return;
0396     }
0397     csr_reg = of_iomap(np, 0);
0398     if (!csr_reg) {
0399         pr_err("Unable to map resource for %pOF\n", np);
0400         return;
0401     }
0402     of_property_read_string(np, "clock-output-names", &clk_name);
0403 
0404     denom = BIT(XGENE_CLK_PMD_WIDTH);
0405     flags |= XGENE_CLK_PMD_SCALE_INVERTED;
0406 
0407     clk = xgene_register_clk_pmd(NULL, clk_name,
0408                      of_clk_get_parent_name(np, 0), 0,
0409                      csr_reg, XGENE_CLK_PMD_SHIFT,
0410                      XGENE_CLK_PMD_WIDTH, denom,
0411                      flags, &clk_lock);
0412     if (!IS_ERR(clk)) {
0413         of_clk_add_provider(np, of_clk_src_simple_get, clk);
0414         clk_register_clkdev(clk, clk_name, NULL);
0415         pr_debug("Add %s clock\n", clk_name);
0416     } else {
0417         if (csr_reg)
0418             iounmap(csr_reg);
0419     }
0420 }
0421 
0422 /* IP Clock */
0423 struct xgene_dev_parameters {
0424     void __iomem *csr_reg;      /* CSR for IP clock */
0425     u32 reg_clk_offset;     /* Offset to clock enable CSR */
0426     u32 reg_clk_mask;       /* Mask bit for clock enable */
0427     u32 reg_csr_offset;     /* Offset to CSR reset */
0428     u32 reg_csr_mask;       /* Mask bit for disable CSR reset */
0429     void __iomem *divider_reg;  /* CSR for divider */
0430     u32 reg_divider_offset;     /* Offset to divider register */
0431     u32 reg_divider_shift;      /* Bit shift to divider field */
0432     u32 reg_divider_width;      /* Width of the bit to divider field */
0433 };
0434 
0435 struct xgene_clk {
0436     struct clk_hw   hw;
0437     spinlock_t  *lock;
0438     struct xgene_dev_parameters param;
0439 };
0440 
0441 #define to_xgene_clk(_hw) container_of(_hw, struct xgene_clk, hw)
0442 
0443 static int xgene_clk_enable(struct clk_hw *hw)
0444 {
0445     struct xgene_clk *pclk = to_xgene_clk(hw);
0446     unsigned long flags = 0;
0447     u32 data;
0448 
0449     if (pclk->lock)
0450         spin_lock_irqsave(pclk->lock, flags);
0451 
0452     if (pclk->param.csr_reg) {
0453         pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
0454         /* First enable the clock */
0455         data = xgene_clk_read(pclk->param.csr_reg +
0456                     pclk->param.reg_clk_offset);
0457         data |= pclk->param.reg_clk_mask;
0458         xgene_clk_write(data, pclk->param.csr_reg +
0459                     pclk->param.reg_clk_offset);
0460         pr_debug("%s clk offset 0x%08X mask 0x%08X value 0x%08X\n",
0461             clk_hw_get_name(hw),
0462             pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
0463             data);
0464 
0465         /* Second enable the CSR */
0466         data = xgene_clk_read(pclk->param.csr_reg +
0467                     pclk->param.reg_csr_offset);
0468         data &= ~pclk->param.reg_csr_mask;
0469         xgene_clk_write(data, pclk->param.csr_reg +
0470                     pclk->param.reg_csr_offset);
0471         pr_debug("%s csr offset 0x%08X mask 0x%08X value 0x%08X\n",
0472             clk_hw_get_name(hw),
0473             pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
0474             data);
0475     }
0476 
0477     if (pclk->lock)
0478         spin_unlock_irqrestore(pclk->lock, flags);
0479 
0480     return 0;
0481 }
0482 
0483 static void xgene_clk_disable(struct clk_hw *hw)
0484 {
0485     struct xgene_clk *pclk = to_xgene_clk(hw);
0486     unsigned long flags = 0;
0487     u32 data;
0488 
0489     if (pclk->lock)
0490         spin_lock_irqsave(pclk->lock, flags);
0491 
0492     if (pclk->param.csr_reg) {
0493         pr_debug("%s clock disabled\n", clk_hw_get_name(hw));
0494         /* First put the CSR in reset */
0495         data = xgene_clk_read(pclk->param.csr_reg +
0496                     pclk->param.reg_csr_offset);
0497         data |= pclk->param.reg_csr_mask;
0498         xgene_clk_write(data, pclk->param.csr_reg +
0499                     pclk->param.reg_csr_offset);
0500 
0501         /* Second disable the clock */
0502         data = xgene_clk_read(pclk->param.csr_reg +
0503                     pclk->param.reg_clk_offset);
0504         data &= ~pclk->param.reg_clk_mask;
0505         xgene_clk_write(data, pclk->param.csr_reg +
0506                     pclk->param.reg_clk_offset);
0507     }
0508 
0509     if (pclk->lock)
0510         spin_unlock_irqrestore(pclk->lock, flags);
0511 }
0512 
0513 static int xgene_clk_is_enabled(struct clk_hw *hw)
0514 {
0515     struct xgene_clk *pclk = to_xgene_clk(hw);
0516     u32 data = 0;
0517 
0518     if (pclk->param.csr_reg) {
0519         pr_debug("%s clock checking\n", clk_hw_get_name(hw));
0520         data = xgene_clk_read(pclk->param.csr_reg +
0521                     pclk->param.reg_clk_offset);
0522         pr_debug("%s clock is %s\n", clk_hw_get_name(hw),
0523             data & pclk->param.reg_clk_mask ? "enabled" :
0524                             "disabled");
0525     }
0526 
0527     if (!pclk->param.csr_reg)
0528         return 1;
0529     return data & pclk->param.reg_clk_mask ? 1 : 0;
0530 }
0531 
0532 static unsigned long xgene_clk_recalc_rate(struct clk_hw *hw,
0533                 unsigned long parent_rate)
0534 {
0535     struct xgene_clk *pclk = to_xgene_clk(hw);
0536     u32 data;
0537 
0538     if (pclk->param.divider_reg) {
0539         data = xgene_clk_read(pclk->param.divider_reg +
0540                     pclk->param.reg_divider_offset);
0541         data >>= pclk->param.reg_divider_shift;
0542         data &= (1 << pclk->param.reg_divider_width) - 1;
0543 
0544         pr_debug("%s clock recalc rate %ld parent %ld\n",
0545             clk_hw_get_name(hw),
0546             parent_rate / data, parent_rate);
0547 
0548         return parent_rate / data;
0549     } else {
0550         pr_debug("%s clock recalc rate %ld parent %ld\n",
0551             clk_hw_get_name(hw), parent_rate, parent_rate);
0552         return parent_rate;
0553     }
0554 }
0555 
0556 static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
0557                 unsigned long parent_rate)
0558 {
0559     struct xgene_clk *pclk = to_xgene_clk(hw);
0560     unsigned long flags = 0;
0561     u32 data;
0562     u32 divider;
0563     u32 divider_save;
0564 
0565     if (pclk->lock)
0566         spin_lock_irqsave(pclk->lock, flags);
0567 
0568     if (pclk->param.divider_reg) {
0569         /* Let's compute the divider */
0570         if (rate > parent_rate)
0571             rate = parent_rate;
0572         divider_save = divider = parent_rate / rate; /* Rounded down */
0573         divider &= (1 << pclk->param.reg_divider_width) - 1;
0574         divider <<= pclk->param.reg_divider_shift;
0575 
0576         /* Set new divider */
0577         data = xgene_clk_read(pclk->param.divider_reg +
0578                 pclk->param.reg_divider_offset);
0579         data &= ~(((1 << pclk->param.reg_divider_width) - 1)
0580                 << pclk->param.reg_divider_shift);
0581         data |= divider;
0582         xgene_clk_write(data, pclk->param.divider_reg +
0583                     pclk->param.reg_divider_offset);
0584         pr_debug("%s clock set rate %ld\n", clk_hw_get_name(hw),
0585             parent_rate / divider_save);
0586     } else {
0587         divider_save = 1;
0588     }
0589 
0590     if (pclk->lock)
0591         spin_unlock_irqrestore(pclk->lock, flags);
0592 
0593     return parent_rate / divider_save;
0594 }
0595 
0596 static long xgene_clk_round_rate(struct clk_hw *hw, unsigned long rate,
0597                 unsigned long *prate)
0598 {
0599     struct xgene_clk *pclk = to_xgene_clk(hw);
0600     unsigned long parent_rate = *prate;
0601     u32 divider;
0602 
0603     if (pclk->param.divider_reg) {
0604         /* Let's compute the divider */
0605         if (rate > parent_rate)
0606             rate = parent_rate;
0607         divider = parent_rate / rate;   /* Rounded down */
0608     } else {
0609         divider = 1;
0610     }
0611 
0612     return parent_rate / divider;
0613 }
0614 
0615 static const struct clk_ops xgene_clk_ops = {
0616     .enable = xgene_clk_enable,
0617     .disable = xgene_clk_disable,
0618     .is_enabled = xgene_clk_is_enabled,
0619     .recalc_rate = xgene_clk_recalc_rate,
0620     .set_rate = xgene_clk_set_rate,
0621     .round_rate = xgene_clk_round_rate,
0622 };
0623 
0624 static struct clk *xgene_register_clk(struct device *dev,
0625         const char *name, const char *parent_name,
0626         struct xgene_dev_parameters *parameters, spinlock_t *lock)
0627 {
0628     struct xgene_clk *apmclk;
0629     struct clk *clk;
0630     struct clk_init_data init;
0631     int rc;
0632 
0633     /* allocate the APM clock structure */
0634     apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
0635     if (!apmclk)
0636         return ERR_PTR(-ENOMEM);
0637 
0638     init.name = name;
0639     init.ops = &xgene_clk_ops;
0640     init.flags = 0;
0641     init.parent_names = parent_name ? &parent_name : NULL;
0642     init.num_parents = parent_name ? 1 : 0;
0643 
0644     apmclk->lock = lock;
0645     apmclk->hw.init = &init;
0646     apmclk->param = *parameters;
0647 
0648     /* Register the clock */
0649     clk = clk_register(dev, &apmclk->hw);
0650     if (IS_ERR(clk)) {
0651         pr_err("%s: could not register clk %s\n", __func__, name);
0652         kfree(apmclk);
0653         return clk;
0654     }
0655 
0656     /* Register the clock for lookup */
0657     rc = clk_register_clkdev(clk, name, NULL);
0658     if (rc != 0) {
0659         pr_err("%s: could not register lookup clk %s\n",
0660             __func__, name);
0661     }
0662     return clk;
0663 }
0664 
0665 static void __init xgene_devclk_init(struct device_node *np)
0666 {
0667     const char *clk_name = np->full_name;
0668     struct clk *clk;
0669     struct resource res;
0670     int rc;
0671     struct xgene_dev_parameters parameters;
0672     int i;
0673 
0674     /* Check if the entry is disabled */
0675         if (!of_device_is_available(np))
0676                 return;
0677 
0678     /* Parse the DTS register for resource */
0679     parameters.csr_reg = NULL;
0680     parameters.divider_reg = NULL;
0681     for (i = 0; i < 2; i++) {
0682         void __iomem *map_res;
0683         rc = of_address_to_resource(np, i, &res);
0684         if (rc != 0) {
0685             if (i == 0) {
0686                 pr_err("no DTS register for %pOF\n", np);
0687                 return;
0688             }
0689             break;
0690         }
0691         map_res = of_iomap(np, i);
0692         if (!map_res) {
0693             pr_err("Unable to map resource %d for %pOF\n", i, np);
0694             goto err;
0695         }
0696         if (strcmp(res.name, "div-reg") == 0)
0697             parameters.divider_reg = map_res;
0698         else /* if (strcmp(res->name, "csr-reg") == 0) */
0699             parameters.csr_reg = map_res;
0700     }
0701     if (of_property_read_u32(np, "csr-offset", &parameters.reg_csr_offset))
0702         parameters.reg_csr_offset = 0;
0703     if (of_property_read_u32(np, "csr-mask", &parameters.reg_csr_mask))
0704         parameters.reg_csr_mask = 0xF;
0705     if (of_property_read_u32(np, "enable-offset",
0706                 &parameters.reg_clk_offset))
0707         parameters.reg_clk_offset = 0x8;
0708     if (of_property_read_u32(np, "enable-mask", &parameters.reg_clk_mask))
0709         parameters.reg_clk_mask = 0xF;
0710     if (of_property_read_u32(np, "divider-offset",
0711                 &parameters.reg_divider_offset))
0712         parameters.reg_divider_offset = 0;
0713     if (of_property_read_u32(np, "divider-width",
0714                 &parameters.reg_divider_width))
0715         parameters.reg_divider_width = 0;
0716     if (of_property_read_u32(np, "divider-shift",
0717                 &parameters.reg_divider_shift))
0718         parameters.reg_divider_shift = 0;
0719     of_property_read_string(np, "clock-output-names", &clk_name);
0720 
0721     clk = xgene_register_clk(NULL, clk_name,
0722         of_clk_get_parent_name(np, 0), &parameters, &clk_lock);
0723     if (IS_ERR(clk))
0724         goto err;
0725     pr_debug("Add %s clock\n", clk_name);
0726     rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
0727     if (rc != 0)
0728         pr_err("%s: could register provider clk %pOF\n", __func__, np);
0729 
0730     return;
0731 
0732 err:
0733     if (parameters.csr_reg)
0734         iounmap(parameters.csr_reg);
0735     if (parameters.divider_reg)
0736         iounmap(parameters.divider_reg);
0737 }
0738 
0739 CLK_OF_DECLARE(xgene_socpll_clock, "apm,xgene-socpll-clock", xgene_socpllclk_init);
0740 CLK_OF_DECLARE(xgene_pcppll_clock, "apm,xgene-pcppll-clock", xgene_pcppllclk_init);
0741 CLK_OF_DECLARE(xgene_pmd_clock, "apm,xgene-pmd-clock", xgene_pmdclk_init);
0742 CLK_OF_DECLARE(xgene_socpll_v2_clock, "apm,xgene-socpll-v2-clock",
0743            xgene_socpllclk_init);
0744 CLK_OF_DECLARE(xgene_pcppll_v2_clock, "apm,xgene-pcppll-v2-clock",
0745            xgene_pcppllclk_init);
0746 CLK_OF_DECLARE(xgene_dev_clock, "apm,xgene-device-clock", xgene_devclk_init);