0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/clk-provider.h>
0011 #include <linux/device.h>
0012 #include <linux/module.h>
0013 #include <linux/slab.h>
0014 #include <linux/io.h>
0015 #include <linux/err.h>
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027 static inline u32 clk_mux_readl(struct clk_mux *mux)
0028 {
0029 if (mux->flags & CLK_MUX_BIG_ENDIAN)
0030 return ioread32be(mux->reg);
0031
0032 return readl(mux->reg);
0033 }
0034
0035 static inline void clk_mux_writel(struct clk_mux *mux, u32 val)
0036 {
0037 if (mux->flags & CLK_MUX_BIG_ENDIAN)
0038 iowrite32be(val, mux->reg);
0039 else
0040 writel(val, mux->reg);
0041 }
0042
0043 int clk_mux_val_to_index(struct clk_hw *hw, const u32 *table, unsigned int flags,
0044 unsigned int val)
0045 {
0046 int num_parents = clk_hw_get_num_parents(hw);
0047
0048 if (table) {
0049 int i;
0050
0051 for (i = 0; i < num_parents; i++)
0052 if (table[i] == val)
0053 return i;
0054 return -EINVAL;
0055 }
0056
0057 if (val && (flags & CLK_MUX_INDEX_BIT))
0058 val = ffs(val) - 1;
0059
0060 if (val && (flags & CLK_MUX_INDEX_ONE))
0061 val--;
0062
0063 if (val >= num_parents)
0064 return -EINVAL;
0065
0066 return val;
0067 }
0068 EXPORT_SYMBOL_GPL(clk_mux_val_to_index);
0069
0070 unsigned int clk_mux_index_to_val(const u32 *table, unsigned int flags, u8 index)
0071 {
0072 unsigned int val = index;
0073
0074 if (table) {
0075 val = table[index];
0076 } else {
0077 if (flags & CLK_MUX_INDEX_BIT)
0078 val = 1 << index;
0079
0080 if (flags & CLK_MUX_INDEX_ONE)
0081 val++;
0082 }
0083
0084 return val;
0085 }
0086 EXPORT_SYMBOL_GPL(clk_mux_index_to_val);
0087
0088 static u8 clk_mux_get_parent(struct clk_hw *hw)
0089 {
0090 struct clk_mux *mux = to_clk_mux(hw);
0091 u32 val;
0092
0093 val = clk_mux_readl(mux) >> mux->shift;
0094 val &= mux->mask;
0095
0096 return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
0097 }
0098
0099 static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
0100 {
0101 struct clk_mux *mux = to_clk_mux(hw);
0102 u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
0103 unsigned long flags = 0;
0104 u32 reg;
0105
0106 if (mux->lock)
0107 spin_lock_irqsave(mux->lock, flags);
0108 else
0109 __acquire(mux->lock);
0110
0111 if (mux->flags & CLK_MUX_HIWORD_MASK) {
0112 reg = mux->mask << (mux->shift + 16);
0113 } else {
0114 reg = clk_mux_readl(mux);
0115 reg &= ~(mux->mask << mux->shift);
0116 }
0117 val = val << mux->shift;
0118 reg |= val;
0119 clk_mux_writel(mux, reg);
0120
0121 if (mux->lock)
0122 spin_unlock_irqrestore(mux->lock, flags);
0123 else
0124 __release(mux->lock);
0125
0126 return 0;
0127 }
0128
0129 static int clk_mux_determine_rate(struct clk_hw *hw,
0130 struct clk_rate_request *req)
0131 {
0132 struct clk_mux *mux = to_clk_mux(hw);
0133
0134 return clk_mux_determine_rate_flags(hw, req, mux->flags);
0135 }
0136
0137 const struct clk_ops clk_mux_ops = {
0138 .get_parent = clk_mux_get_parent,
0139 .set_parent = clk_mux_set_parent,
0140 .determine_rate = clk_mux_determine_rate,
0141 };
0142 EXPORT_SYMBOL_GPL(clk_mux_ops);
0143
0144 const struct clk_ops clk_mux_ro_ops = {
0145 .get_parent = clk_mux_get_parent,
0146 };
0147 EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
0148
0149 struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
0150 const char *name, u8 num_parents,
0151 const char * const *parent_names,
0152 const struct clk_hw **parent_hws,
0153 const struct clk_parent_data *parent_data,
0154 unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
0155 u8 clk_mux_flags, const u32 *table, spinlock_t *lock)
0156 {
0157 struct clk_mux *mux;
0158 struct clk_hw *hw;
0159 struct clk_init_data init = {};
0160 int ret = -EINVAL;
0161
0162 if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
0163 u8 width = fls(mask) - ffs(mask) + 1;
0164
0165 if (width + shift > 16) {
0166 pr_err("mux value exceeds LOWORD field\n");
0167 return ERR_PTR(-EINVAL);
0168 }
0169 }
0170
0171
0172 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
0173 if (!mux)
0174 return ERR_PTR(-ENOMEM);
0175
0176 init.name = name;
0177 if (clk_mux_flags & CLK_MUX_READ_ONLY)
0178 init.ops = &clk_mux_ro_ops;
0179 else
0180 init.ops = &clk_mux_ops;
0181 init.flags = flags;
0182 init.parent_names = parent_names;
0183 init.parent_data = parent_data;
0184 init.parent_hws = parent_hws;
0185 init.num_parents = num_parents;
0186
0187
0188 mux->reg = reg;
0189 mux->shift = shift;
0190 mux->mask = mask;
0191 mux->flags = clk_mux_flags;
0192 mux->lock = lock;
0193 mux->table = table;
0194 mux->hw.init = &init;
0195
0196 hw = &mux->hw;
0197 if (dev || !np)
0198 ret = clk_hw_register(dev, hw);
0199 else if (np)
0200 ret = of_clk_hw_register(np, hw);
0201 if (ret) {
0202 kfree(mux);
0203 hw = ERR_PTR(ret);
0204 }
0205
0206 return hw;
0207 }
0208 EXPORT_SYMBOL_GPL(__clk_hw_register_mux);
0209
0210 static void devm_clk_hw_release_mux(struct device *dev, void *res)
0211 {
0212 clk_hw_unregister_mux(*(struct clk_hw **)res);
0213 }
0214
0215 struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node *np,
0216 const char *name, u8 num_parents,
0217 const char * const *parent_names,
0218 const struct clk_hw **parent_hws,
0219 const struct clk_parent_data *parent_data,
0220 unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
0221 u8 clk_mux_flags, const u32 *table, spinlock_t *lock)
0222 {
0223 struct clk_hw **ptr, *hw;
0224
0225 ptr = devres_alloc(devm_clk_hw_release_mux, sizeof(*ptr), GFP_KERNEL);
0226 if (!ptr)
0227 return ERR_PTR(-ENOMEM);
0228
0229 hw = __clk_hw_register_mux(dev, np, name, num_parents, parent_names, parent_hws,
0230 parent_data, flags, reg, shift, mask,
0231 clk_mux_flags, table, lock);
0232
0233 if (!IS_ERR(hw)) {
0234 *ptr = hw;
0235 devres_add(dev, ptr);
0236 } else {
0237 devres_free(ptr);
0238 }
0239
0240 return hw;
0241 }
0242 EXPORT_SYMBOL_GPL(__devm_clk_hw_register_mux);
0243
0244 struct clk *clk_register_mux_table(struct device *dev, const char *name,
0245 const char * const *parent_names, u8 num_parents,
0246 unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
0247 u8 clk_mux_flags, const u32 *table, spinlock_t *lock)
0248 {
0249 struct clk_hw *hw;
0250
0251 hw = clk_hw_register_mux_table(dev, name, parent_names,
0252 num_parents, flags, reg, shift, mask,
0253 clk_mux_flags, table, lock);
0254 if (IS_ERR(hw))
0255 return ERR_CAST(hw);
0256 return hw->clk;
0257 }
0258 EXPORT_SYMBOL_GPL(clk_register_mux_table);
0259
0260 void clk_unregister_mux(struct clk *clk)
0261 {
0262 struct clk_mux *mux;
0263 struct clk_hw *hw;
0264
0265 hw = __clk_get_hw(clk);
0266 if (!hw)
0267 return;
0268
0269 mux = to_clk_mux(hw);
0270
0271 clk_unregister(clk);
0272 kfree(mux);
0273 }
0274 EXPORT_SYMBOL_GPL(clk_unregister_mux);
0275
0276 void clk_hw_unregister_mux(struct clk_hw *hw)
0277 {
0278 struct clk_mux *mux;
0279
0280 mux = to_clk_mux(hw);
0281
0282 clk_hw_unregister(hw);
0283 kfree(mux);
0284 }
0285 EXPORT_SYMBOL_GPL(clk_hw_unregister_mux);