0001
0002
0003
0004
0005
0006
0007 #include <linux/clk-provider.h>
0008 #include <linux/err.h>
0009 #include <linux/io.h>
0010 #include <linux/slab.h>
0011
0012 #include "clk.h"
0013
0014 struct clk_divider_gate {
0015 struct clk_divider divider;
0016 u32 cached_val;
0017 };
0018
0019 static inline struct clk_divider_gate *to_clk_divider_gate(struct clk_hw *hw)
0020 {
0021 struct clk_divider *div = to_clk_divider(hw);
0022
0023 return container_of(div, struct clk_divider_gate, divider);
0024 }
0025
0026 static unsigned long clk_divider_gate_recalc_rate_ro(struct clk_hw *hw,
0027 unsigned long parent_rate)
0028 {
0029 struct clk_divider *div = to_clk_divider(hw);
0030 unsigned int val;
0031
0032 val = readl(div->reg) >> div->shift;
0033 val &= clk_div_mask(div->width);
0034 if (!val)
0035 return 0;
0036
0037 return divider_recalc_rate(hw, parent_rate, val, div->table,
0038 div->flags, div->width);
0039 }
0040
0041 static unsigned long clk_divider_gate_recalc_rate(struct clk_hw *hw,
0042 unsigned long parent_rate)
0043 {
0044 struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
0045 struct clk_divider *div = to_clk_divider(hw);
0046 unsigned long flags;
0047 unsigned int val;
0048
0049 spin_lock_irqsave(div->lock, flags);
0050
0051 if (!clk_hw_is_enabled(hw)) {
0052 val = div_gate->cached_val;
0053 } else {
0054 val = readl(div->reg) >> div->shift;
0055 val &= clk_div_mask(div->width);
0056 }
0057
0058 spin_unlock_irqrestore(div->lock, flags);
0059
0060 if (!val)
0061 return 0;
0062
0063 return divider_recalc_rate(hw, parent_rate, val, div->table,
0064 div->flags, div->width);
0065 }
0066
0067 static int clk_divider_determine_rate(struct clk_hw *hw,
0068 struct clk_rate_request *req)
0069 {
0070 return clk_divider_ops.determine_rate(hw, req);
0071 }
0072
0073 static int clk_divider_gate_set_rate(struct clk_hw *hw, unsigned long rate,
0074 unsigned long parent_rate)
0075 {
0076 struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
0077 struct clk_divider *div = to_clk_divider(hw);
0078 unsigned long flags;
0079 int value;
0080 u32 val;
0081
0082 value = divider_get_val(rate, parent_rate, div->table,
0083 div->width, div->flags);
0084 if (value < 0)
0085 return value;
0086
0087 spin_lock_irqsave(div->lock, flags);
0088
0089 if (clk_hw_is_enabled(hw)) {
0090 val = readl(div->reg);
0091 val &= ~(clk_div_mask(div->width) << div->shift);
0092 val |= (u32)value << div->shift;
0093 writel(val, div->reg);
0094 } else {
0095 div_gate->cached_val = value;
0096 }
0097
0098 spin_unlock_irqrestore(div->lock, flags);
0099
0100 return 0;
0101 }
0102
0103 static int clk_divider_enable(struct clk_hw *hw)
0104 {
0105 struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
0106 struct clk_divider *div = to_clk_divider(hw);
0107 unsigned long flags;
0108 u32 val;
0109
0110 if (!div_gate->cached_val) {
0111 pr_err("%s: no valid preset rate\n", clk_hw_get_name(hw));
0112 return -EINVAL;
0113 }
0114
0115 spin_lock_irqsave(div->lock, flags);
0116
0117 val = readl(div->reg);
0118 val |= div_gate->cached_val << div->shift;
0119 writel(val, div->reg);
0120
0121 spin_unlock_irqrestore(div->lock, flags);
0122
0123 return 0;
0124 }
0125
0126 static void clk_divider_disable(struct clk_hw *hw)
0127 {
0128 struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
0129 struct clk_divider *div = to_clk_divider(hw);
0130 unsigned long flags;
0131 u32 val;
0132
0133 spin_lock_irqsave(div->lock, flags);
0134
0135
0136 val = readl(div->reg) >> div->shift;
0137 val &= clk_div_mask(div->width);
0138 div_gate->cached_val = val;
0139 writel(0, div->reg);
0140
0141 spin_unlock_irqrestore(div->lock, flags);
0142 }
0143
0144 static int clk_divider_is_enabled(struct clk_hw *hw)
0145 {
0146 struct clk_divider *div = to_clk_divider(hw);
0147 u32 val;
0148
0149 val = readl(div->reg) >> div->shift;
0150 val &= clk_div_mask(div->width);
0151
0152 return val ? 1 : 0;
0153 }
0154
0155 static const struct clk_ops clk_divider_gate_ro_ops = {
0156 .recalc_rate = clk_divider_gate_recalc_rate_ro,
0157 .determine_rate = clk_divider_determine_rate,
0158 };
0159
0160 static const struct clk_ops clk_divider_gate_ops = {
0161 .recalc_rate = clk_divider_gate_recalc_rate,
0162 .determine_rate = clk_divider_determine_rate,
0163 .set_rate = clk_divider_gate_set_rate,
0164 .enable = clk_divider_enable,
0165 .disable = clk_divider_disable,
0166 .is_enabled = clk_divider_is_enabled,
0167 };
0168
0169
0170
0171
0172
0173
0174
0175
0176 struct clk_hw *imx_clk_hw_divider_gate(const char *name, const char *parent_name,
0177 unsigned long flags, void __iomem *reg,
0178 u8 shift, u8 width, u8 clk_divider_flags,
0179 const struct clk_div_table *table,
0180 spinlock_t *lock)
0181 {
0182 struct clk_init_data init;
0183 struct clk_divider_gate *div_gate;
0184 struct clk_hw *hw;
0185 u32 val;
0186 int ret;
0187
0188 div_gate = kzalloc(sizeof(*div_gate), GFP_KERNEL);
0189 if (!div_gate)
0190 return ERR_PTR(-ENOMEM);
0191
0192 init.name = name;
0193 if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
0194 init.ops = &clk_divider_gate_ro_ops;
0195 else
0196 init.ops = &clk_divider_gate_ops;
0197 init.flags = flags;
0198 init.parent_names = parent_name ? &parent_name : NULL;
0199 init.num_parents = parent_name ? 1 : 0;
0200
0201 div_gate->divider.reg = reg;
0202 div_gate->divider.shift = shift;
0203 div_gate->divider.width = width;
0204 div_gate->divider.lock = lock;
0205 div_gate->divider.table = table;
0206 div_gate->divider.hw.init = &init;
0207 div_gate->divider.flags = CLK_DIVIDER_ONE_BASED | clk_divider_flags;
0208
0209 val = readl(reg) >> shift;
0210 val &= clk_div_mask(width);
0211 div_gate->cached_val = val;
0212
0213 hw = &div_gate->divider.hw;
0214 ret = clk_hw_register(NULL, hw);
0215 if (ret) {
0216 kfree(div_gate);
0217 hw = ERR_PTR(ret);
0218 }
0219
0220 return hw;
0221 }