0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/clk-provider.h>
0009 #include <linux/delay.h>
0010 #include <linux/device.h>
0011 #include <linux/iopoll.h>
0012 #include <linux/of.h>
0013
0014 #include "clk-cgu.h"
0015
0016 #define to_lgm_clk_pll(_hw) container_of(_hw, struct lgm_clk_pll, hw)
0017 #define PLL_REF_DIV(x) ((x) + 0x08)
0018
0019
0020
0021
0022
0023 static unsigned long
0024 lgm_pll_calc_rate(unsigned long prate, unsigned int mult,
0025 unsigned int div, unsigned int frac, unsigned int frac_div)
0026 {
0027 u64 crate, frate, rate64;
0028
0029 rate64 = prate;
0030 crate = rate64 * mult;
0031 frate = rate64 * frac;
0032 do_div(frate, frac_div);
0033 crate += frate;
0034 do_div(crate, div);
0035
0036 return crate;
0037 }
0038
0039 static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate)
0040 {
0041 struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
0042 unsigned int div, mult, frac;
0043 unsigned long flags;
0044
0045 spin_lock_irqsave(&pll->lock, flags);
0046 mult = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 0, 12);
0047 div = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 18, 6);
0048 frac = lgm_get_clk_val(pll->membase, pll->reg, 2, 24);
0049 spin_unlock_irqrestore(&pll->lock, flags);
0050
0051 if (pll->type == TYPE_LJPLL)
0052 div *= 4;
0053
0054 return lgm_pll_calc_rate(prate, mult, div, frac, BIT(24));
0055 }
0056
0057 static int lgm_pll_is_enabled(struct clk_hw *hw)
0058 {
0059 struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
0060 unsigned long flags;
0061 unsigned int ret;
0062
0063 spin_lock_irqsave(&pll->lock, flags);
0064 ret = lgm_get_clk_val(pll->membase, pll->reg, 0, 1);
0065 spin_unlock_irqrestore(&pll->lock, flags);
0066
0067 return ret;
0068 }
0069
0070 static int lgm_pll_enable(struct clk_hw *hw)
0071 {
0072 struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
0073 unsigned long flags;
0074 u32 val;
0075 int ret;
0076
0077 spin_lock_irqsave(&pll->lock, flags);
0078 lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1);
0079 ret = readl_poll_timeout_atomic(pll->membase + pll->reg,
0080 val, (val & 0x1), 1, 100);
0081 spin_unlock_irqrestore(&pll->lock, flags);
0082
0083 return ret;
0084 }
0085
0086 static void lgm_pll_disable(struct clk_hw *hw)
0087 {
0088 struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
0089 unsigned long flags;
0090
0091 spin_lock_irqsave(&pll->lock, flags);
0092 lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 0);
0093 spin_unlock_irqrestore(&pll->lock, flags);
0094 }
0095
0096 static const struct clk_ops lgm_pll_ops = {
0097 .recalc_rate = lgm_pll_recalc_rate,
0098 .is_enabled = lgm_pll_is_enabled,
0099 .enable = lgm_pll_enable,
0100 .disable = lgm_pll_disable,
0101 };
0102
0103 static struct clk_hw *
0104 lgm_clk_register_pll(struct lgm_clk_provider *ctx,
0105 const struct lgm_pll_clk_data *list)
0106 {
0107 struct clk_init_data init = {};
0108 struct lgm_clk_pll *pll;
0109 struct device *dev = ctx->dev;
0110 struct clk_hw *hw;
0111 int ret;
0112
0113 init.ops = &lgm_pll_ops;
0114 init.name = list->name;
0115 init.flags = list->flags;
0116 init.parent_data = list->parent_data;
0117 init.num_parents = list->num_parents;
0118
0119 pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
0120 if (!pll)
0121 return ERR_PTR(-ENOMEM);
0122
0123 pll->membase = ctx->membase;
0124 pll->lock = ctx->lock;
0125 pll->reg = list->reg;
0126 pll->flags = list->flags;
0127 pll->type = list->type;
0128 pll->hw.init = &init;
0129
0130 hw = &pll->hw;
0131 ret = devm_clk_hw_register(dev, hw);
0132 if (ret)
0133 return ERR_PTR(ret);
0134
0135 return hw;
0136 }
0137
0138 int lgm_clk_register_plls(struct lgm_clk_provider *ctx,
0139 const struct lgm_pll_clk_data *list,
0140 unsigned int nr_clk)
0141 {
0142 struct clk_hw *hw;
0143 int i;
0144
0145 for (i = 0; i < nr_clk; i++, list++) {
0146 hw = lgm_clk_register_pll(ctx, list);
0147 if (IS_ERR(hw)) {
0148 dev_err(ctx->dev, "failed to register pll: %s\n",
0149 list->name);
0150 return PTR_ERR(hw);
0151 }
0152 ctx->clk_data.hws[list->id] = hw;
0153 }
0154
0155 return 0;
0156 }