0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/bitfield.h>
0010 #include <linux/clk-provider.h>
0011 #include <linux/clkdev.h>
0012 #include <linux/clk/at91_pmc.h>
0013 #include <linux/of.h>
0014 #include <linux/mfd/syscon.h>
0015 #include <linux/regmap.h>
0016
0017 #include "pmc.h"
0018
0019 #define GENERATED_MAX_DIV 255
0020
0021 struct clk_generated {
0022 struct clk_hw hw;
0023 struct regmap *regmap;
0024 struct clk_range range;
0025 spinlock_t *lock;
0026 u32 *mux_table;
0027 u32 id;
0028 u32 gckdiv;
0029 const struct clk_pcr_layout *layout;
0030 struct at91_clk_pms pms;
0031 u8 parent_id;
0032 int chg_pid;
0033 };
0034
0035 #define to_clk_generated(hw) \
0036 container_of(hw, struct clk_generated, hw)
0037
0038 static int clk_generated_set(struct clk_generated *gck, int status)
0039 {
0040 unsigned long flags;
0041 unsigned int enable = status ? AT91_PMC_PCR_GCKEN : 0;
0042
0043 spin_lock_irqsave(gck->lock, flags);
0044 regmap_write(gck->regmap, gck->layout->offset,
0045 (gck->id & gck->layout->pid_mask));
0046 regmap_update_bits(gck->regmap, gck->layout->offset,
0047 AT91_PMC_PCR_GCKDIV_MASK | gck->layout->gckcss_mask |
0048 gck->layout->cmd | enable,
0049 field_prep(gck->layout->gckcss_mask, gck->parent_id) |
0050 gck->layout->cmd |
0051 FIELD_PREP(AT91_PMC_PCR_GCKDIV_MASK, gck->gckdiv) |
0052 enable);
0053 spin_unlock_irqrestore(gck->lock, flags);
0054
0055 return 0;
0056 }
0057
0058 static int clk_generated_enable(struct clk_hw *hw)
0059 {
0060 struct clk_generated *gck = to_clk_generated(hw);
0061
0062 pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n",
0063 __func__, gck->gckdiv, gck->parent_id);
0064
0065 clk_generated_set(gck, 1);
0066
0067 return 0;
0068 }
0069
0070 static void clk_generated_disable(struct clk_hw *hw)
0071 {
0072 struct clk_generated *gck = to_clk_generated(hw);
0073 unsigned long flags;
0074
0075 spin_lock_irqsave(gck->lock, flags);
0076 regmap_write(gck->regmap, gck->layout->offset,
0077 (gck->id & gck->layout->pid_mask));
0078 regmap_update_bits(gck->regmap, gck->layout->offset,
0079 gck->layout->cmd | AT91_PMC_PCR_GCKEN,
0080 gck->layout->cmd);
0081 spin_unlock_irqrestore(gck->lock, flags);
0082 }
0083
0084 static int clk_generated_is_enabled(struct clk_hw *hw)
0085 {
0086 struct clk_generated *gck = to_clk_generated(hw);
0087 unsigned long flags;
0088 unsigned int status;
0089
0090 spin_lock_irqsave(gck->lock, flags);
0091 regmap_write(gck->regmap, gck->layout->offset,
0092 (gck->id & gck->layout->pid_mask));
0093 regmap_read(gck->regmap, gck->layout->offset, &status);
0094 spin_unlock_irqrestore(gck->lock, flags);
0095
0096 return !!(status & AT91_PMC_PCR_GCKEN);
0097 }
0098
0099 static unsigned long
0100 clk_generated_recalc_rate(struct clk_hw *hw,
0101 unsigned long parent_rate)
0102 {
0103 struct clk_generated *gck = to_clk_generated(hw);
0104
0105 return DIV_ROUND_CLOSEST(parent_rate, gck->gckdiv + 1);
0106 }
0107
0108 static void clk_generated_best_diff(struct clk_rate_request *req,
0109 struct clk_hw *parent,
0110 unsigned long parent_rate, u32 div,
0111 int *best_diff, long *best_rate)
0112 {
0113 unsigned long tmp_rate;
0114 int tmp_diff;
0115
0116 if (!div)
0117 tmp_rate = parent_rate;
0118 else
0119 tmp_rate = parent_rate / div;
0120
0121 if (tmp_rate < req->min_rate || tmp_rate > req->max_rate)
0122 return;
0123
0124 tmp_diff = abs(req->rate - tmp_rate);
0125
0126 if (*best_diff < 0 || *best_diff >= tmp_diff) {
0127 *best_rate = tmp_rate;
0128 *best_diff = tmp_diff;
0129 req->best_parent_rate = parent_rate;
0130 req->best_parent_hw = parent;
0131 }
0132 }
0133
0134 static int clk_generated_determine_rate(struct clk_hw *hw,
0135 struct clk_rate_request *req)
0136 {
0137 struct clk_generated *gck = to_clk_generated(hw);
0138 struct clk_hw *parent = NULL;
0139 struct clk_rate_request req_parent = *req;
0140 long best_rate = -EINVAL;
0141 unsigned long min_rate, parent_rate;
0142 int best_diff = -1;
0143 int i;
0144 u32 div;
0145
0146
0147 if (gck->range.max && req->rate > gck->range.max)
0148 req->rate = gck->range.max;
0149 if (gck->range.min && req->rate < gck->range.min)
0150 req->rate = gck->range.min;
0151
0152 for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
0153 if (gck->chg_pid == i)
0154 continue;
0155
0156 parent = clk_hw_get_parent_by_index(hw, i);
0157 if (!parent)
0158 continue;
0159
0160 parent_rate = clk_hw_get_rate(parent);
0161 min_rate = DIV_ROUND_CLOSEST(parent_rate, GENERATED_MAX_DIV + 1);
0162 if (!parent_rate ||
0163 (gck->range.max && min_rate > gck->range.max))
0164 continue;
0165
0166 div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
0167 if (div > GENERATED_MAX_DIV + 1)
0168 div = GENERATED_MAX_DIV + 1;
0169
0170 clk_generated_best_diff(req, parent, parent_rate, div,
0171 &best_diff, &best_rate);
0172
0173 if (!best_diff)
0174 break;
0175 }
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 if (gck->chg_pid < 0)
0188 goto end;
0189
0190 parent = clk_hw_get_parent_by_index(hw, gck->chg_pid);
0191 if (!parent)
0192 goto end;
0193
0194 for (div = 1; div < GENERATED_MAX_DIV + 2; div++) {
0195 req_parent.rate = req->rate * div;
0196 if (__clk_determine_rate(parent, &req_parent))
0197 continue;
0198 clk_generated_best_diff(req, parent, req_parent.rate, div,
0199 &best_diff, &best_rate);
0200
0201 if (!best_diff)
0202 break;
0203 }
0204
0205 end:
0206 pr_debug("GCLK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
0207 __func__, best_rate,
0208 __clk_get_name((req->best_parent_hw)->clk),
0209 req->best_parent_rate);
0210
0211 if (best_rate < 0 || (gck->range.max && best_rate > gck->range.max))
0212 return -EINVAL;
0213
0214 req->rate = best_rate;
0215 return 0;
0216 }
0217
0218
0219 static int clk_generated_set_parent(struct clk_hw *hw, u8 index)
0220 {
0221 struct clk_generated *gck = to_clk_generated(hw);
0222
0223 if (index >= clk_hw_get_num_parents(hw))
0224 return -EINVAL;
0225
0226 if (gck->mux_table)
0227 gck->parent_id = clk_mux_index_to_val(gck->mux_table, 0, index);
0228 else
0229 gck->parent_id = index;
0230
0231 return 0;
0232 }
0233
0234 static u8 clk_generated_get_parent(struct clk_hw *hw)
0235 {
0236 struct clk_generated *gck = to_clk_generated(hw);
0237
0238 return gck->parent_id;
0239 }
0240
0241
0242 static int clk_generated_set_rate(struct clk_hw *hw,
0243 unsigned long rate,
0244 unsigned long parent_rate)
0245 {
0246 struct clk_generated *gck = to_clk_generated(hw);
0247 u32 div;
0248
0249 if (!rate)
0250 return -EINVAL;
0251
0252 if (gck->range.max && rate > gck->range.max)
0253 return -EINVAL;
0254
0255 div = DIV_ROUND_CLOSEST(parent_rate, rate);
0256 if (div > GENERATED_MAX_DIV + 1 || !div)
0257 return -EINVAL;
0258
0259 gck->gckdiv = div - 1;
0260 return 0;
0261 }
0262
0263 static int clk_generated_save_context(struct clk_hw *hw)
0264 {
0265 struct clk_generated *gck = to_clk_generated(hw);
0266
0267 gck->pms.status = clk_generated_is_enabled(&gck->hw);
0268
0269 return 0;
0270 }
0271
0272 static void clk_generated_restore_context(struct clk_hw *hw)
0273 {
0274 struct clk_generated *gck = to_clk_generated(hw);
0275
0276 if (gck->pms.status)
0277 clk_generated_set(gck, gck->pms.status);
0278 }
0279
0280 static const struct clk_ops generated_ops = {
0281 .enable = clk_generated_enable,
0282 .disable = clk_generated_disable,
0283 .is_enabled = clk_generated_is_enabled,
0284 .recalc_rate = clk_generated_recalc_rate,
0285 .determine_rate = clk_generated_determine_rate,
0286 .get_parent = clk_generated_get_parent,
0287 .set_parent = clk_generated_set_parent,
0288 .set_rate = clk_generated_set_rate,
0289 .save_context = clk_generated_save_context,
0290 .restore_context = clk_generated_restore_context,
0291 };
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302 static void clk_generated_startup(struct clk_generated *gck)
0303 {
0304 u32 tmp;
0305 unsigned long flags;
0306
0307 spin_lock_irqsave(gck->lock, flags);
0308 regmap_write(gck->regmap, gck->layout->offset,
0309 (gck->id & gck->layout->pid_mask));
0310 regmap_read(gck->regmap, gck->layout->offset, &tmp);
0311 spin_unlock_irqrestore(gck->lock, flags);
0312
0313 gck->parent_id = field_get(gck->layout->gckcss_mask, tmp);
0314 gck->gckdiv = FIELD_GET(AT91_PMC_PCR_GCKDIV_MASK, tmp);
0315 }
0316
0317 struct clk_hw * __init
0318 at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
0319 const struct clk_pcr_layout *layout,
0320 const char *name, const char **parent_names,
0321 u32 *mux_table, u8 num_parents, u8 id,
0322 const struct clk_range *range,
0323 int chg_pid)
0324 {
0325 struct clk_generated *gck;
0326 struct clk_init_data init;
0327 struct clk_hw *hw;
0328 int ret;
0329
0330 gck = kzalloc(sizeof(*gck), GFP_KERNEL);
0331 if (!gck)
0332 return ERR_PTR(-ENOMEM);
0333
0334 init.name = name;
0335 init.ops = &generated_ops;
0336 init.parent_names = parent_names;
0337 init.num_parents = num_parents;
0338 init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
0339 if (chg_pid >= 0)
0340 init.flags |= CLK_SET_RATE_PARENT;
0341
0342 gck->id = id;
0343 gck->hw.init = &init;
0344 gck->regmap = regmap;
0345 gck->lock = lock;
0346 gck->range = *range;
0347 gck->chg_pid = chg_pid;
0348 gck->layout = layout;
0349 gck->mux_table = mux_table;
0350
0351 clk_generated_startup(gck);
0352 hw = &gck->hw;
0353 ret = clk_hw_register(NULL, &gck->hw);
0354 if (ret) {
0355 kfree(gck);
0356 hw = ERR_PTR(ret);
0357 }
0358
0359 return hw;
0360 }