0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/clk-provider.h>
0010 #include <linux/device.h>
0011 #include <linux/module.h>
0012 #include <linux/slab.h>
0013 #include <linux/io.h>
0014 #include <linux/err.h>
0015 #include <linux/string.h>
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027 static inline u32 clk_gate_readl(struct clk_gate *gate)
0028 {
0029 if (gate->flags & CLK_GATE_BIG_ENDIAN)
0030 return ioread32be(gate->reg);
0031
0032 return readl(gate->reg);
0033 }
0034
0035 static inline void clk_gate_writel(struct clk_gate *gate, u32 val)
0036 {
0037 if (gate->flags & CLK_GATE_BIG_ENDIAN)
0038 iowrite32be(val, gate->reg);
0039 else
0040 writel(val, gate->reg);
0041 }
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056 static void clk_gate_endisable(struct clk_hw *hw, int enable)
0057 {
0058 struct clk_gate *gate = to_clk_gate(hw);
0059 int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
0060 unsigned long flags;
0061 u32 reg;
0062
0063 set ^= enable;
0064
0065 if (gate->lock)
0066 spin_lock_irqsave(gate->lock, flags);
0067 else
0068 __acquire(gate->lock);
0069
0070 if (gate->flags & CLK_GATE_HIWORD_MASK) {
0071 reg = BIT(gate->bit_idx + 16);
0072 if (set)
0073 reg |= BIT(gate->bit_idx);
0074 } else {
0075 reg = clk_gate_readl(gate);
0076
0077 if (set)
0078 reg |= BIT(gate->bit_idx);
0079 else
0080 reg &= ~BIT(gate->bit_idx);
0081 }
0082
0083 clk_gate_writel(gate, reg);
0084
0085 if (gate->lock)
0086 spin_unlock_irqrestore(gate->lock, flags);
0087 else
0088 __release(gate->lock);
0089 }
0090
0091 static int clk_gate_enable(struct clk_hw *hw)
0092 {
0093 clk_gate_endisable(hw, 1);
0094
0095 return 0;
0096 }
0097
0098 static void clk_gate_disable(struct clk_hw *hw)
0099 {
0100 clk_gate_endisable(hw, 0);
0101 }
0102
0103 int clk_gate_is_enabled(struct clk_hw *hw)
0104 {
0105 u32 reg;
0106 struct clk_gate *gate = to_clk_gate(hw);
0107
0108 reg = clk_gate_readl(gate);
0109
0110
0111 if (gate->flags & CLK_GATE_SET_TO_DISABLE)
0112 reg ^= BIT(gate->bit_idx);
0113
0114 reg &= BIT(gate->bit_idx);
0115
0116 return reg ? 1 : 0;
0117 }
0118 EXPORT_SYMBOL_GPL(clk_gate_is_enabled);
0119
0120 const struct clk_ops clk_gate_ops = {
0121 .enable = clk_gate_enable,
0122 .disable = clk_gate_disable,
0123 .is_enabled = clk_gate_is_enabled,
0124 };
0125 EXPORT_SYMBOL_GPL(clk_gate_ops);
0126
0127 struct clk_hw *__clk_hw_register_gate(struct device *dev,
0128 struct device_node *np, const char *name,
0129 const char *parent_name, const struct clk_hw *parent_hw,
0130 const struct clk_parent_data *parent_data,
0131 unsigned long flags,
0132 void __iomem *reg, u8 bit_idx,
0133 u8 clk_gate_flags, spinlock_t *lock)
0134 {
0135 struct clk_gate *gate;
0136 struct clk_hw *hw;
0137 struct clk_init_data init = {};
0138 int ret = -EINVAL;
0139
0140 if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
0141 if (bit_idx > 15) {
0142 pr_err("gate bit exceeds LOWORD field\n");
0143 return ERR_PTR(-EINVAL);
0144 }
0145 }
0146
0147
0148 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
0149 if (!gate)
0150 return ERR_PTR(-ENOMEM);
0151
0152 init.name = name;
0153 init.ops = &clk_gate_ops;
0154 init.flags = flags;
0155 init.parent_names = parent_name ? &parent_name : NULL;
0156 init.parent_hws = parent_hw ? &parent_hw : NULL;
0157 init.parent_data = parent_data;
0158 if (parent_name || parent_hw || parent_data)
0159 init.num_parents = 1;
0160 else
0161 init.num_parents = 0;
0162
0163
0164 gate->reg = reg;
0165 gate->bit_idx = bit_idx;
0166 gate->flags = clk_gate_flags;
0167 gate->lock = lock;
0168 gate->hw.init = &init;
0169
0170 hw = &gate->hw;
0171 if (dev || !np)
0172 ret = clk_hw_register(dev, hw);
0173 else if (np)
0174 ret = of_clk_hw_register(np, hw);
0175 if (ret) {
0176 kfree(gate);
0177 hw = ERR_PTR(ret);
0178 }
0179
0180 return hw;
0181
0182 }
0183 EXPORT_SYMBOL_GPL(__clk_hw_register_gate);
0184
0185 struct clk *clk_register_gate(struct device *dev, const char *name,
0186 const char *parent_name, unsigned long flags,
0187 void __iomem *reg, u8 bit_idx,
0188 u8 clk_gate_flags, spinlock_t *lock)
0189 {
0190 struct clk_hw *hw;
0191
0192 hw = clk_hw_register_gate(dev, name, parent_name, flags, reg,
0193 bit_idx, clk_gate_flags, lock);
0194 if (IS_ERR(hw))
0195 return ERR_CAST(hw);
0196 return hw->clk;
0197 }
0198 EXPORT_SYMBOL_GPL(clk_register_gate);
0199
0200 void clk_unregister_gate(struct clk *clk)
0201 {
0202 struct clk_gate *gate;
0203 struct clk_hw *hw;
0204
0205 hw = __clk_get_hw(clk);
0206 if (!hw)
0207 return;
0208
0209 gate = to_clk_gate(hw);
0210
0211 clk_unregister(clk);
0212 kfree(gate);
0213 }
0214 EXPORT_SYMBOL_GPL(clk_unregister_gate);
0215
0216 void clk_hw_unregister_gate(struct clk_hw *hw)
0217 {
0218 struct clk_gate *gate;
0219
0220 gate = to_clk_gate(hw);
0221
0222 clk_hw_unregister(hw);
0223 kfree(gate);
0224 }
0225 EXPORT_SYMBOL_GPL(clk_hw_unregister_gate);
0226
0227 static void devm_clk_hw_release_gate(struct device *dev, void *res)
0228 {
0229 clk_hw_unregister_gate(*(struct clk_hw **)res);
0230 }
0231
0232 struct clk_hw *__devm_clk_hw_register_gate(struct device *dev,
0233 struct device_node *np, const char *name,
0234 const char *parent_name, const struct clk_hw *parent_hw,
0235 const struct clk_parent_data *parent_data,
0236 unsigned long flags,
0237 void __iomem *reg, u8 bit_idx,
0238 u8 clk_gate_flags, spinlock_t *lock)
0239 {
0240 struct clk_hw **ptr, *hw;
0241
0242 ptr = devres_alloc(devm_clk_hw_release_gate, sizeof(*ptr), GFP_KERNEL);
0243 if (!ptr)
0244 return ERR_PTR(-ENOMEM);
0245
0246 hw = __clk_hw_register_gate(dev, np, name, parent_name, parent_hw,
0247 parent_data, flags, reg, bit_idx,
0248 clk_gate_flags, lock);
0249
0250 if (!IS_ERR(hw)) {
0251 *ptr = hw;
0252 devres_add(dev, ptr);
0253 } else {
0254 devres_free(ptr);
0255 }
0256
0257 return hw;
0258 }
0259 EXPORT_SYMBOL_GPL(__devm_clk_hw_register_gate);