Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Copyright (C) 2016 Maxime Ripard
0004  * Maxime Ripard <maxime.ripard@free-electrons.com>
0005  */
0006 
0007 #include <linux/clk-provider.h>
0008 #include <linux/io.h>
0009 
0010 #include "ccu_gate.h"
0011 #include "ccu_mult.h"
0012 
0013 struct _ccu_mult {
0014     unsigned long   mult, min, max;
0015 };
0016 
0017 static void ccu_mult_find_best(unsigned long parent, unsigned long rate,
0018                    struct _ccu_mult *mult)
0019 {
0020     int _mult;
0021 
0022     _mult = rate / parent;
0023     if (_mult < mult->min)
0024         _mult = mult->min;
0025 
0026     if (_mult > mult->max)
0027         _mult = mult->max;
0028 
0029     mult->mult = _mult;
0030 }
0031 
0032 static unsigned long ccu_mult_round_rate(struct ccu_mux_internal *mux,
0033                      struct clk_hw *parent,
0034                      unsigned long *parent_rate,
0035                      unsigned long rate,
0036                      void *data)
0037 {
0038     struct ccu_mult *cm = data;
0039     struct _ccu_mult _cm;
0040 
0041     _cm.min = cm->mult.min;
0042 
0043     if (cm->mult.max)
0044         _cm.max = cm->mult.max;
0045     else
0046         _cm.max = (1 << cm->mult.width) + cm->mult.offset - 1;
0047 
0048     ccu_mult_find_best(*parent_rate, rate, &_cm);
0049 
0050     return *parent_rate * _cm.mult;
0051 }
0052 
0053 static void ccu_mult_disable(struct clk_hw *hw)
0054 {
0055     struct ccu_mult *cm = hw_to_ccu_mult(hw);
0056 
0057     return ccu_gate_helper_disable(&cm->common, cm->enable);
0058 }
0059 
0060 static int ccu_mult_enable(struct clk_hw *hw)
0061 {
0062     struct ccu_mult *cm = hw_to_ccu_mult(hw);
0063 
0064     return ccu_gate_helper_enable(&cm->common, cm->enable);
0065 }
0066 
0067 static int ccu_mult_is_enabled(struct clk_hw *hw)
0068 {
0069     struct ccu_mult *cm = hw_to_ccu_mult(hw);
0070 
0071     return ccu_gate_helper_is_enabled(&cm->common, cm->enable);
0072 }
0073 
0074 static unsigned long ccu_mult_recalc_rate(struct clk_hw *hw,
0075                     unsigned long parent_rate)
0076 {
0077     struct ccu_mult *cm = hw_to_ccu_mult(hw);
0078     unsigned long val;
0079     u32 reg;
0080 
0081     if (ccu_frac_helper_is_enabled(&cm->common, &cm->frac))
0082         return ccu_frac_helper_read_rate(&cm->common, &cm->frac);
0083 
0084     reg = readl(cm->common.base + cm->common.reg);
0085     val = reg >> cm->mult.shift;
0086     val &= (1 << cm->mult.width) - 1;
0087 
0088     parent_rate = ccu_mux_helper_apply_prediv(&cm->common, &cm->mux, -1,
0089                           parent_rate);
0090 
0091     return parent_rate * (val + cm->mult.offset);
0092 }
0093 
0094 static int ccu_mult_determine_rate(struct clk_hw *hw,
0095                 struct clk_rate_request *req)
0096 {
0097     struct ccu_mult *cm = hw_to_ccu_mult(hw);
0098 
0099     return ccu_mux_helper_determine_rate(&cm->common, &cm->mux,
0100                          req, ccu_mult_round_rate, cm);
0101 }
0102 
0103 static int ccu_mult_set_rate(struct clk_hw *hw, unsigned long rate,
0104                unsigned long parent_rate)
0105 {
0106     struct ccu_mult *cm = hw_to_ccu_mult(hw);
0107     struct _ccu_mult _cm;
0108     unsigned long flags;
0109     u32 reg;
0110 
0111     if (ccu_frac_helper_has_rate(&cm->common, &cm->frac, rate)) {
0112         ccu_frac_helper_enable(&cm->common, &cm->frac);
0113 
0114         return ccu_frac_helper_set_rate(&cm->common, &cm->frac,
0115                         rate, cm->lock);
0116     } else {
0117         ccu_frac_helper_disable(&cm->common, &cm->frac);
0118     }
0119 
0120     parent_rate = ccu_mux_helper_apply_prediv(&cm->common, &cm->mux, -1,
0121                           parent_rate);
0122 
0123     _cm.min = cm->mult.min;
0124 
0125     if (cm->mult.max)
0126         _cm.max = cm->mult.max;
0127     else
0128         _cm.max = (1 << cm->mult.width) + cm->mult.offset - 1;
0129 
0130     ccu_mult_find_best(parent_rate, rate, &_cm);
0131 
0132     spin_lock_irqsave(cm->common.lock, flags);
0133 
0134     reg = readl(cm->common.base + cm->common.reg);
0135     reg &= ~GENMASK(cm->mult.width + cm->mult.shift - 1, cm->mult.shift);
0136     reg |= ((_cm.mult - cm->mult.offset) << cm->mult.shift);
0137 
0138     writel(reg, cm->common.base + cm->common.reg);
0139 
0140     spin_unlock_irqrestore(cm->common.lock, flags);
0141 
0142     ccu_helper_wait_for_lock(&cm->common, cm->lock);
0143 
0144     return 0;
0145 }
0146 
0147 static u8 ccu_mult_get_parent(struct clk_hw *hw)
0148 {
0149     struct ccu_mult *cm = hw_to_ccu_mult(hw);
0150 
0151     return ccu_mux_helper_get_parent(&cm->common, &cm->mux);
0152 }
0153 
0154 static int ccu_mult_set_parent(struct clk_hw *hw, u8 index)
0155 {
0156     struct ccu_mult *cm = hw_to_ccu_mult(hw);
0157 
0158     return ccu_mux_helper_set_parent(&cm->common, &cm->mux, index);
0159 }
0160 
0161 const struct clk_ops ccu_mult_ops = {
0162     .disable    = ccu_mult_disable,
0163     .enable     = ccu_mult_enable,
0164     .is_enabled = ccu_mult_is_enabled,
0165 
0166     .get_parent = ccu_mult_get_parent,
0167     .set_parent = ccu_mult_set_parent,
0168 
0169     .determine_rate = ccu_mult_determine_rate,
0170     .recalc_rate    = ccu_mult_recalc_rate,
0171     .set_rate   = ccu_mult_set_rate,
0172 };
0173 EXPORT_SYMBOL_NS_GPL(ccu_mult_ops, SUNXI_CCU);