0001
0002
0003
0004
0005
0006 #include <linux/clk-provider.h>
0007 #include <linux/io.h>
0008 #include <linux/slab.h>
0009 #include "clk.h"
0010
0011 #define div_mask(width) ((1 << (width)) - 1)
0012
0013 static bool _is_best_half_div(unsigned long rate, unsigned long now,
0014 unsigned long best, unsigned long flags)
0015 {
0016 if (flags & CLK_DIVIDER_ROUND_CLOSEST)
0017 return abs(rate - now) < abs(rate - best);
0018
0019 return now <= rate && now > best;
0020 }
0021
0022 static unsigned long clk_half_divider_recalc_rate(struct clk_hw *hw,
0023 unsigned long parent_rate)
0024 {
0025 struct clk_divider *divider = to_clk_divider(hw);
0026 unsigned int val;
0027
0028 val = readl(divider->reg) >> divider->shift;
0029 val &= div_mask(divider->width);
0030 val = val * 2 + 3;
0031
0032 return DIV_ROUND_UP_ULL(((u64)parent_rate * 2), val);
0033 }
0034
0035 static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
0036 unsigned long *best_parent_rate, u8 width,
0037 unsigned long flags)
0038 {
0039 unsigned int i, bestdiv = 0;
0040 unsigned long parent_rate, best = 0, now, maxdiv;
0041 unsigned long parent_rate_saved = *best_parent_rate;
0042
0043 if (!rate)
0044 rate = 1;
0045
0046 maxdiv = div_mask(width);
0047
0048 if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
0049 parent_rate = *best_parent_rate;
0050 bestdiv = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
0051 if (bestdiv < 3)
0052 bestdiv = 0;
0053 else
0054 bestdiv = (bestdiv - 3) / 2;
0055 bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
0056 return bestdiv;
0057 }
0058
0059
0060
0061
0062
0063 maxdiv = min(ULONG_MAX / rate, maxdiv);
0064
0065 for (i = 0; i <= maxdiv; i++) {
0066 if (((u64)rate * (i * 2 + 3)) == ((u64)parent_rate_saved * 2)) {
0067
0068
0069
0070
0071
0072 *best_parent_rate = parent_rate_saved;
0073 return i;
0074 }
0075 parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
0076 ((u64)rate * (i * 2 + 3)) / 2);
0077 now = DIV_ROUND_UP_ULL(((u64)parent_rate * 2),
0078 (i * 2 + 3));
0079
0080 if (_is_best_half_div(rate, now, best, flags)) {
0081 bestdiv = i;
0082 best = now;
0083 *best_parent_rate = parent_rate;
0084 }
0085 }
0086
0087 if (!bestdiv) {
0088 bestdiv = div_mask(width);
0089 *best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1);
0090 }
0091
0092 return bestdiv;
0093 }
0094
0095 static long clk_half_divider_round_rate(struct clk_hw *hw, unsigned long rate,
0096 unsigned long *prate)
0097 {
0098 struct clk_divider *divider = to_clk_divider(hw);
0099 int div;
0100
0101 div = clk_half_divider_bestdiv(hw, rate, prate,
0102 divider->width,
0103 divider->flags);
0104
0105 return DIV_ROUND_UP_ULL(((u64)*prate * 2), div * 2 + 3);
0106 }
0107
0108 static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
0109 unsigned long parent_rate)
0110 {
0111 struct clk_divider *divider = to_clk_divider(hw);
0112 unsigned int value;
0113 unsigned long flags = 0;
0114 u32 val;
0115
0116 value = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
0117 value = (value - 3) / 2;
0118 value = min_t(unsigned int, value, div_mask(divider->width));
0119
0120 if (divider->lock)
0121 spin_lock_irqsave(divider->lock, flags);
0122 else
0123 __acquire(divider->lock);
0124
0125 if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
0126 val = div_mask(divider->width) << (divider->shift + 16);
0127 } else {
0128 val = readl(divider->reg);
0129 val &= ~(div_mask(divider->width) << divider->shift);
0130 }
0131 val |= value << divider->shift;
0132 writel(val, divider->reg);
0133
0134 if (divider->lock)
0135 spin_unlock_irqrestore(divider->lock, flags);
0136 else
0137 __release(divider->lock);
0138
0139 return 0;
0140 }
0141
0142 static const struct clk_ops clk_half_divider_ops = {
0143 .recalc_rate = clk_half_divider_recalc_rate,
0144 .round_rate = clk_half_divider_round_rate,
0145 .set_rate = clk_half_divider_set_rate,
0146 };
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158 struct clk *rockchip_clk_register_halfdiv(const char *name,
0159 const char *const *parent_names,
0160 u8 num_parents, void __iomem *base,
0161 int muxdiv_offset, u8 mux_shift,
0162 u8 mux_width, u8 mux_flags,
0163 u8 div_shift, u8 div_width,
0164 u8 div_flags, int gate_offset,
0165 u8 gate_shift, u8 gate_flags,
0166 unsigned long flags,
0167 spinlock_t *lock)
0168 {
0169 struct clk_hw *hw = ERR_PTR(-ENOMEM);
0170 struct clk_mux *mux = NULL;
0171 struct clk_gate *gate = NULL;
0172 struct clk_divider *div = NULL;
0173 const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
0174 *gate_ops = NULL;
0175
0176 if (num_parents > 1) {
0177 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
0178 if (!mux)
0179 return ERR_PTR(-ENOMEM);
0180
0181 mux->reg = base + muxdiv_offset;
0182 mux->shift = mux_shift;
0183 mux->mask = BIT(mux_width) - 1;
0184 mux->flags = mux_flags;
0185 mux->lock = lock;
0186 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
0187 : &clk_mux_ops;
0188 }
0189
0190 if (gate_offset >= 0) {
0191 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
0192 if (!gate)
0193 goto err_gate;
0194
0195 gate->flags = gate_flags;
0196 gate->reg = base + gate_offset;
0197 gate->bit_idx = gate_shift;
0198 gate->lock = lock;
0199 gate_ops = &clk_gate_ops;
0200 }
0201
0202 if (div_width > 0) {
0203 div = kzalloc(sizeof(*div), GFP_KERNEL);
0204 if (!div)
0205 goto err_div;
0206
0207 div->flags = div_flags;
0208 div->reg = base + muxdiv_offset;
0209 div->shift = div_shift;
0210 div->width = div_width;
0211 div->lock = lock;
0212 div_ops = &clk_half_divider_ops;
0213 }
0214
0215 hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
0216 mux ? &mux->hw : NULL, mux_ops,
0217 div ? &div->hw : NULL, div_ops,
0218 gate ? &gate->hw : NULL, gate_ops,
0219 flags);
0220 if (IS_ERR(hw))
0221 goto err_div;
0222
0223 return hw->clk;
0224 err_div:
0225 kfree(gate);
0226 err_gate:
0227 kfree(mux);
0228 return ERR_CAST(hw);
0229 }