0001
0002
0003
0004
0005
0006
0007 #include <linux/clk-provider.h>
0008 #include <linux/io.h>
0009
0010 #include "ccu_gate.h"
0011 #include "ccu_mp.h"
0012
0013 static void ccu_mp_find_best(unsigned long parent, unsigned long rate,
0014 unsigned int max_m, unsigned int max_p,
0015 unsigned int *m, unsigned int *p)
0016 {
0017 unsigned long best_rate = 0;
0018 unsigned int best_m = 0, best_p = 0;
0019 unsigned int _m, _p;
0020
0021 for (_p = 1; _p <= max_p; _p <<= 1) {
0022 for (_m = 1; _m <= max_m; _m++) {
0023 unsigned long tmp_rate = parent / _p / _m;
0024
0025 if (tmp_rate > rate)
0026 continue;
0027
0028 if ((rate - tmp_rate) < (rate - best_rate)) {
0029 best_rate = tmp_rate;
0030 best_m = _m;
0031 best_p = _p;
0032 }
0033 }
0034 }
0035
0036 *m = best_m;
0037 *p = best_p;
0038 }
0039
0040 static unsigned long ccu_mp_find_best_with_parent_adj(struct clk_hw *hw,
0041 unsigned long *parent,
0042 unsigned long rate,
0043 unsigned int max_m,
0044 unsigned int max_p)
0045 {
0046 unsigned long parent_rate_saved;
0047 unsigned long parent_rate, now;
0048 unsigned long best_rate = 0;
0049 unsigned int _m, _p, div;
0050 unsigned long maxdiv;
0051
0052 parent_rate_saved = *parent;
0053
0054
0055
0056
0057
0058 maxdiv = max_m * max_p;
0059 maxdiv = min(ULONG_MAX / rate, maxdiv);
0060
0061 for (_p = 1; _p <= max_p; _p <<= 1) {
0062 for (_m = 1; _m <= max_m; _m++) {
0063 div = _m * _p;
0064
0065 if (div > maxdiv)
0066 break;
0067
0068 if (rate * div == parent_rate_saved) {
0069
0070
0071
0072
0073
0074
0075 *parent = parent_rate_saved;
0076 return rate;
0077 }
0078
0079 parent_rate = clk_hw_round_rate(hw, rate * div);
0080 now = parent_rate / div;
0081
0082 if (now <= rate && now > best_rate) {
0083 best_rate = now;
0084 *parent = parent_rate;
0085
0086 if (now == rate)
0087 return rate;
0088 }
0089 }
0090 }
0091
0092 return best_rate;
0093 }
0094
0095 static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
0096 struct clk_hw *hw,
0097 unsigned long *parent_rate,
0098 unsigned long rate,
0099 void *data)
0100 {
0101 struct ccu_mp *cmp = data;
0102 unsigned int max_m, max_p;
0103 unsigned int m, p;
0104
0105 if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
0106 rate *= cmp->fixed_post_div;
0107
0108 max_m = cmp->m.max ?: 1 << cmp->m.width;
0109 max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
0110
0111 if (!clk_hw_can_set_rate_parent(&cmp->common.hw)) {
0112 ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p);
0113 rate = *parent_rate / p / m;
0114 } else {
0115 rate = ccu_mp_find_best_with_parent_adj(hw, parent_rate, rate,
0116 max_m, max_p);
0117 }
0118
0119 if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
0120 rate /= cmp->fixed_post_div;
0121
0122 return rate;
0123 }
0124
0125 static void ccu_mp_disable(struct clk_hw *hw)
0126 {
0127 struct ccu_mp *cmp = hw_to_ccu_mp(hw);
0128
0129 return ccu_gate_helper_disable(&cmp->common, cmp->enable);
0130 }
0131
0132 static int ccu_mp_enable(struct clk_hw *hw)
0133 {
0134 struct ccu_mp *cmp = hw_to_ccu_mp(hw);
0135
0136 return ccu_gate_helper_enable(&cmp->common, cmp->enable);
0137 }
0138
0139 static int ccu_mp_is_enabled(struct clk_hw *hw)
0140 {
0141 struct ccu_mp *cmp = hw_to_ccu_mp(hw);
0142
0143 return ccu_gate_helper_is_enabled(&cmp->common, cmp->enable);
0144 }
0145
0146 static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
0147 unsigned long parent_rate)
0148 {
0149 struct ccu_mp *cmp = hw_to_ccu_mp(hw);
0150 unsigned long rate;
0151 unsigned int m, p;
0152 u32 reg;
0153
0154
0155 parent_rate = ccu_mux_helper_apply_prediv(&cmp->common, &cmp->mux, -1,
0156 parent_rate);
0157
0158 reg = readl(cmp->common.base + cmp->common.reg);
0159
0160 m = reg >> cmp->m.shift;
0161 m &= (1 << cmp->m.width) - 1;
0162 m += cmp->m.offset;
0163 if (!m)
0164 m++;
0165
0166 p = reg >> cmp->p.shift;
0167 p &= (1 << cmp->p.width) - 1;
0168
0169 rate = (parent_rate >> p) / m;
0170 if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
0171 rate /= cmp->fixed_post_div;
0172
0173 return rate;
0174 }
0175
0176 static int ccu_mp_determine_rate(struct clk_hw *hw,
0177 struct clk_rate_request *req)
0178 {
0179 struct ccu_mp *cmp = hw_to_ccu_mp(hw);
0180
0181 return ccu_mux_helper_determine_rate(&cmp->common, &cmp->mux,
0182 req, ccu_mp_round_rate, cmp);
0183 }
0184
0185 static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
0186 unsigned long parent_rate)
0187 {
0188 struct ccu_mp *cmp = hw_to_ccu_mp(hw);
0189 unsigned long flags;
0190 unsigned int max_m, max_p;
0191 unsigned int m, p;
0192 u32 reg;
0193
0194
0195 parent_rate = ccu_mux_helper_apply_prediv(&cmp->common, &cmp->mux, -1,
0196 parent_rate);
0197
0198 max_m = cmp->m.max ?: 1 << cmp->m.width;
0199 max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
0200
0201
0202 if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
0203 rate = rate * cmp->fixed_post_div;
0204
0205 ccu_mp_find_best(parent_rate, rate, max_m, max_p, &m, &p);
0206
0207 spin_lock_irqsave(cmp->common.lock, flags);
0208
0209 reg = readl(cmp->common.base + cmp->common.reg);
0210 reg &= ~GENMASK(cmp->m.width + cmp->m.shift - 1, cmp->m.shift);
0211 reg &= ~GENMASK(cmp->p.width + cmp->p.shift - 1, cmp->p.shift);
0212 reg |= (m - cmp->m.offset) << cmp->m.shift;
0213 reg |= ilog2(p) << cmp->p.shift;
0214
0215 writel(reg, cmp->common.base + cmp->common.reg);
0216
0217 spin_unlock_irqrestore(cmp->common.lock, flags);
0218
0219 return 0;
0220 }
0221
0222 static u8 ccu_mp_get_parent(struct clk_hw *hw)
0223 {
0224 struct ccu_mp *cmp = hw_to_ccu_mp(hw);
0225
0226 return ccu_mux_helper_get_parent(&cmp->common, &cmp->mux);
0227 }
0228
0229 static int ccu_mp_set_parent(struct clk_hw *hw, u8 index)
0230 {
0231 struct ccu_mp *cmp = hw_to_ccu_mp(hw);
0232
0233 return ccu_mux_helper_set_parent(&cmp->common, &cmp->mux, index);
0234 }
0235
0236 const struct clk_ops ccu_mp_ops = {
0237 .disable = ccu_mp_disable,
0238 .enable = ccu_mp_enable,
0239 .is_enabled = ccu_mp_is_enabled,
0240
0241 .get_parent = ccu_mp_get_parent,
0242 .set_parent = ccu_mp_set_parent,
0243
0244 .determine_rate = ccu_mp_determine_rate,
0245 .recalc_rate = ccu_mp_recalc_rate,
0246 .set_rate = ccu_mp_set_rate,
0247 };
0248 EXPORT_SYMBOL_NS_GPL(ccu_mp_ops, SUNXI_CCU);
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267 static unsigned long ccu_mp_mmc_recalc_rate(struct clk_hw *hw,
0268 unsigned long parent_rate)
0269 {
0270 unsigned long rate = ccu_mp_recalc_rate(hw, parent_rate);
0271 struct ccu_common *cm = hw_to_ccu_common(hw);
0272 u32 val = readl(cm->base + cm->reg);
0273
0274 if (val & CCU_MMC_NEW_TIMING_MODE)
0275 return rate / 2;
0276 return rate;
0277 }
0278
0279 static int ccu_mp_mmc_determine_rate(struct clk_hw *hw,
0280 struct clk_rate_request *req)
0281 {
0282 struct ccu_common *cm = hw_to_ccu_common(hw);
0283 u32 val = readl(cm->base + cm->reg);
0284 int ret;
0285
0286
0287 if (val & CCU_MMC_NEW_TIMING_MODE) {
0288 req->rate *= 2;
0289 req->min_rate *= 2;
0290 req->max_rate *= 2;
0291 }
0292
0293 ret = ccu_mp_determine_rate(hw, req);
0294
0295
0296 if (val & CCU_MMC_NEW_TIMING_MODE) {
0297 req->rate /= 2;
0298 req->min_rate /= 2;
0299 req->max_rate /= 2;
0300 }
0301
0302 return ret;
0303 }
0304
0305 static int ccu_mp_mmc_set_rate(struct clk_hw *hw, unsigned long rate,
0306 unsigned long parent_rate)
0307 {
0308 struct ccu_common *cm = hw_to_ccu_common(hw);
0309 u32 val = readl(cm->base + cm->reg);
0310
0311 if (val & CCU_MMC_NEW_TIMING_MODE)
0312 rate *= 2;
0313
0314 return ccu_mp_set_rate(hw, rate, parent_rate);
0315 }
0316
0317 const struct clk_ops ccu_mp_mmc_ops = {
0318 .disable = ccu_mp_disable,
0319 .enable = ccu_mp_enable,
0320 .is_enabled = ccu_mp_is_enabled,
0321
0322 .get_parent = ccu_mp_get_parent,
0323 .set_parent = ccu_mp_set_parent,
0324
0325 .determine_rate = ccu_mp_mmc_determine_rate,
0326 .recalc_rate = ccu_mp_mmc_recalc_rate,
0327 .set_rate = ccu_mp_mmc_set_rate,
0328 };
0329 EXPORT_SYMBOL_NS_GPL(ccu_mp_mmc_ops, SUNXI_CCU);