Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * Copyright 2018-2021 NXP
0004  *   Dong Aisheng <aisheng.dong@nxp.com>
0005  */
0006 
0007 #include <dt-bindings/firmware/imx/rsrc.h>
0008 #include <linux/arm-smccc.h>
0009 #include <linux/bsearch.h>
0010 #include <linux/clk-provider.h>
0011 #include <linux/err.h>
0012 #include <linux/of_platform.h>
0013 #include <linux/platform_device.h>
0014 #include <linux/pm_domain.h>
0015 #include <linux/pm_runtime.h>
0016 #include <linux/slab.h>
0017 
0018 #include "clk-scu.h"
0019 
0020 #define IMX_SIP_CPUFREQ         0xC2000001
0021 #define IMX_SIP_SET_CPUFREQ     0x00
0022 
0023 static struct imx_sc_ipc *ccm_ipc_handle;
0024 static struct device_node *pd_np;
0025 static struct platform_driver imx_clk_scu_driver;
0026 static const struct imx_clk_scu_rsrc_table *rsrc_table;
0027 
0028 struct imx_scu_clk_node {
0029     const char *name;
0030     u32 rsrc;
0031     u8 clk_type;
0032     const char * const *parents;
0033     int num_parents;
0034 
0035     struct clk_hw *hw;
0036     struct list_head node;
0037 };
0038 
0039 struct list_head imx_scu_clks[IMX_SC_R_LAST];
0040 
0041 /*
0042  * struct clk_scu - Description of one SCU clock
0043  * @hw: the common clk_hw
0044  * @rsrc_id: resource ID of this SCU clock
0045  * @clk_type: type of this clock resource
0046  */
0047 struct clk_scu {
0048     struct clk_hw hw;
0049     u16 rsrc_id;
0050     u8 clk_type;
0051 
0052     /* for state save&restore */
0053     struct clk_hw *parent;
0054     u8 parent_index;
0055     bool is_enabled;
0056     u32 rate;
0057 };
0058 
0059 /*
0060  * struct clk_gpr_scu - Description of one SCU GPR clock
0061  * @hw: the common clk_hw
0062  * @rsrc_id: resource ID of this SCU clock
0063  * @gpr_id: GPR ID index to control the divider
0064  */
0065 struct clk_gpr_scu {
0066     struct clk_hw hw;
0067     u16 rsrc_id;
0068     u8 gpr_id;
0069     u8 flags;
0070     bool gate_invert;
0071 };
0072 
0073 #define to_clk_gpr_scu(_hw) container_of(_hw, struct clk_gpr_scu, hw)
0074 
0075 /*
0076  * struct imx_sc_msg_req_set_clock_rate - clock set rate protocol
0077  * @hdr: SCU protocol header
0078  * @rate: rate to set
0079  * @resource: clock resource to set rate
0080  * @clk: clk type of this resource
0081  *
0082  * This structure describes the SCU protocol of clock rate set
0083  */
0084 struct imx_sc_msg_req_set_clock_rate {
0085     struct imx_sc_rpc_msg hdr;
0086     __le32 rate;
0087     __le16 resource;
0088     u8 clk;
0089 } __packed __aligned(4);
0090 
0091 struct req_get_clock_rate {
0092     __le16 resource;
0093     u8 clk;
0094 } __packed __aligned(4);
0095 
0096 struct resp_get_clock_rate {
0097     __le32 rate;
0098 };
0099 
0100 /*
0101  * struct imx_sc_msg_get_clock_rate - clock get rate protocol
0102  * @hdr: SCU protocol header
0103  * @req: get rate request protocol
0104  * @resp: get rate response protocol
0105  *
0106  * This structure describes the SCU protocol of clock rate get
0107  */
0108 struct imx_sc_msg_get_clock_rate {
0109     struct imx_sc_rpc_msg hdr;
0110     union {
0111         struct req_get_clock_rate req;
0112         struct resp_get_clock_rate resp;
0113     } data;
0114 };
0115 
0116 /*
0117  * struct imx_sc_msg_get_clock_parent - clock get parent protocol
0118  * @hdr: SCU protocol header
0119  * @req: get parent request protocol
0120  * @resp: get parent response protocol
0121  *
0122  * This structure describes the SCU protocol of clock get parent
0123  */
0124 struct imx_sc_msg_get_clock_parent {
0125     struct imx_sc_rpc_msg hdr;
0126     union {
0127         struct req_get_clock_parent {
0128             __le16 resource;
0129             u8 clk;
0130         } __packed __aligned(4) req;
0131         struct resp_get_clock_parent {
0132             u8 parent;
0133         } resp;
0134     } data;
0135 };
0136 
0137 /*
0138  * struct imx_sc_msg_set_clock_parent - clock set parent protocol
0139  * @hdr: SCU protocol header
0140  * @req: set parent request protocol
0141  *
0142  * This structure describes the SCU protocol of clock set parent
0143  */
0144 struct imx_sc_msg_set_clock_parent {
0145     struct imx_sc_rpc_msg hdr;
0146     __le16 resource;
0147     u8 clk;
0148     u8 parent;
0149 } __packed;
0150 
0151 /*
0152  * struct imx_sc_msg_req_clock_enable - clock gate protocol
0153  * @hdr: SCU protocol header
0154  * @resource: clock resource to gate
0155  * @clk: clk type of this resource
0156  * @enable: whether gate off the clock
0157  * @autog: HW auto gate enable
0158  *
0159  * This structure describes the SCU protocol of clock gate
0160  */
0161 struct imx_sc_msg_req_clock_enable {
0162     struct imx_sc_rpc_msg hdr;
0163     __le16 resource;
0164     u8 clk;
0165     u8 enable;
0166     u8 autog;
0167 } __packed __aligned(4);
0168 
0169 static inline struct clk_scu *to_clk_scu(struct clk_hw *hw)
0170 {
0171     return container_of(hw, struct clk_scu, hw);
0172 }
0173 
0174 static inline int imx_scu_clk_search_cmp(const void *rsrc, const void *rsrc_p)
0175 {
0176     return *(u32 *)rsrc - *(u32 *)rsrc_p;
0177 }
0178 
0179 static bool imx_scu_clk_is_valid(u32 rsrc_id)
0180 {
0181     void *p;
0182 
0183     if (!rsrc_table)
0184         return true;
0185 
0186     p = bsearch(&rsrc_id, rsrc_table->rsrc, rsrc_table->num,
0187             sizeof(rsrc_table->rsrc[0]), imx_scu_clk_search_cmp);
0188 
0189     return p != NULL;
0190 }
0191 
0192 int imx_clk_scu_init(struct device_node *np,
0193              const struct imx_clk_scu_rsrc_table *data)
0194 {
0195     u32 clk_cells;
0196     int ret, i;
0197 
0198     ret = imx_scu_get_handle(&ccm_ipc_handle);
0199     if (ret)
0200         return ret;
0201 
0202     of_property_read_u32(np, "#clock-cells", &clk_cells);
0203 
0204     if (clk_cells == 2) {
0205         for (i = 0; i < IMX_SC_R_LAST; i++)
0206             INIT_LIST_HEAD(&imx_scu_clks[i]);
0207 
0208         /* pd_np will be used to attach power domains later */
0209         pd_np = of_find_compatible_node(NULL, NULL, "fsl,scu-pd");
0210         if (!pd_np)
0211             return -EINVAL;
0212 
0213         rsrc_table = data;
0214     }
0215 
0216     return platform_driver_register(&imx_clk_scu_driver);
0217 }
0218 
0219 /*
0220  * clk_scu_recalc_rate - Get clock rate for a SCU clock
0221  * @hw: clock to get rate for
0222  * @parent_rate: parent rate provided by common clock framework, not used
0223  *
0224  * Gets the current clock rate of a SCU clock. Returns the current
0225  * clock rate, or zero in failure.
0226  */
0227 static unsigned long clk_scu_recalc_rate(struct clk_hw *hw,
0228                      unsigned long parent_rate)
0229 {
0230     struct clk_scu *clk = to_clk_scu(hw);
0231     struct imx_sc_msg_get_clock_rate msg;
0232     struct imx_sc_rpc_msg *hdr = &msg.hdr;
0233     int ret;
0234 
0235     hdr->ver = IMX_SC_RPC_VERSION;
0236     hdr->svc = IMX_SC_RPC_SVC_PM;
0237     hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_RATE;
0238     hdr->size = 2;
0239 
0240     msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
0241     msg.data.req.clk = clk->clk_type;
0242 
0243     ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
0244     if (ret) {
0245         pr_err("%s: failed to get clock rate %d\n",
0246                clk_hw_get_name(hw), ret);
0247         return 0;
0248     }
0249 
0250     return le32_to_cpu(msg.data.resp.rate);
0251 }
0252 
0253 /*
0254  * clk_scu_round_rate - Round clock rate for a SCU clock
0255  * @hw: clock to round rate for
0256  * @rate: rate to round
0257  * @parent_rate: parent rate provided by common clock framework, not used
0258  *
0259  * Returns the current clock rate, or zero in failure.
0260  */
0261 static long clk_scu_round_rate(struct clk_hw *hw, unsigned long rate,
0262                    unsigned long *parent_rate)
0263 {
0264     /*
0265      * Assume we support all the requested rate and let the SCU firmware
0266      * to handle the left work
0267      */
0268     return rate;
0269 }
0270 
0271 static int clk_scu_atf_set_cpu_rate(struct clk_hw *hw, unsigned long rate,
0272                     unsigned long parent_rate)
0273 {
0274     struct clk_scu *clk = to_clk_scu(hw);
0275     struct arm_smccc_res res;
0276     unsigned long cluster_id;
0277 
0278     if (clk->rsrc_id == IMX_SC_R_A35 || clk->rsrc_id == IMX_SC_R_A53)
0279         cluster_id = 0;
0280     else if (clk->rsrc_id == IMX_SC_R_A72)
0281         cluster_id = 1;
0282     else
0283         return -EINVAL;
0284 
0285     /* CPU frequency scaling can ONLY be done by ARM-Trusted-Firmware */
0286     arm_smccc_smc(IMX_SIP_CPUFREQ, IMX_SIP_SET_CPUFREQ,
0287               cluster_id, rate, 0, 0, 0, 0, &res);
0288 
0289     return 0;
0290 }
0291 
0292 /*
0293  * clk_scu_set_rate - Set rate for a SCU clock
0294  * @hw: clock to change rate for
0295  * @rate: target rate for the clock
0296  * @parent_rate: rate of the clock parent, not used for SCU clocks
0297  *
0298  * Sets a clock frequency for a SCU clock. Returns the SCU
0299  * protocol status.
0300  */
0301 static int clk_scu_set_rate(struct clk_hw *hw, unsigned long rate,
0302                 unsigned long parent_rate)
0303 {
0304     struct clk_scu *clk = to_clk_scu(hw);
0305     struct imx_sc_msg_req_set_clock_rate msg;
0306     struct imx_sc_rpc_msg *hdr = &msg.hdr;
0307 
0308     hdr->ver = IMX_SC_RPC_VERSION;
0309     hdr->svc = IMX_SC_RPC_SVC_PM;
0310     hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_RATE;
0311     hdr->size = 3;
0312 
0313     msg.rate = cpu_to_le32(rate);
0314     msg.resource = cpu_to_le16(clk->rsrc_id);
0315     msg.clk = clk->clk_type;
0316 
0317     return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
0318 }
0319 
0320 static u8 clk_scu_get_parent(struct clk_hw *hw)
0321 {
0322     struct clk_scu *clk = to_clk_scu(hw);
0323     struct imx_sc_msg_get_clock_parent msg;
0324     struct imx_sc_rpc_msg *hdr = &msg.hdr;
0325     int ret;
0326 
0327     hdr->ver = IMX_SC_RPC_VERSION;
0328     hdr->svc = IMX_SC_RPC_SVC_PM;
0329     hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_PARENT;
0330     hdr->size = 2;
0331 
0332     msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
0333     msg.data.req.clk = clk->clk_type;
0334 
0335     ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
0336     if (ret) {
0337         pr_err("%s: failed to get clock parent %d\n",
0338                clk_hw_get_name(hw), ret);
0339         return 0;
0340     }
0341 
0342     clk->parent_index = msg.data.resp.parent;
0343 
0344     return msg.data.resp.parent;
0345 }
0346 
0347 static int clk_scu_set_parent(struct clk_hw *hw, u8 index)
0348 {
0349     struct clk_scu *clk = to_clk_scu(hw);
0350     struct imx_sc_msg_set_clock_parent msg;
0351     struct imx_sc_rpc_msg *hdr = &msg.hdr;
0352     int ret;
0353 
0354     hdr->ver = IMX_SC_RPC_VERSION;
0355     hdr->svc = IMX_SC_RPC_SVC_PM;
0356     hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_PARENT;
0357     hdr->size = 2;
0358 
0359     msg.resource = cpu_to_le16(clk->rsrc_id);
0360     msg.clk = clk->clk_type;
0361     msg.parent = index;
0362 
0363     ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
0364     if (ret) {
0365         pr_err("%s: failed to set clock parent %d\n",
0366                clk_hw_get_name(hw), ret);
0367         return ret;
0368     }
0369 
0370     clk->parent_index = index;
0371 
0372     return 0;
0373 }
0374 
0375 static int sc_pm_clock_enable(struct imx_sc_ipc *ipc, u16 resource,
0376                   u8 clk, bool enable, bool autog)
0377 {
0378     struct imx_sc_msg_req_clock_enable msg;
0379     struct imx_sc_rpc_msg *hdr = &msg.hdr;
0380 
0381     hdr->ver = IMX_SC_RPC_VERSION;
0382     hdr->svc = IMX_SC_RPC_SVC_PM;
0383     hdr->func = IMX_SC_PM_FUNC_CLOCK_ENABLE;
0384     hdr->size = 3;
0385 
0386     msg.resource = cpu_to_le16(resource);
0387     msg.clk = clk;
0388     msg.enable = enable;
0389     msg.autog = autog;
0390 
0391     return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
0392 }
0393 
0394 /*
0395  * clk_scu_prepare - Enable a SCU clock
0396  * @hw: clock to enable
0397  *
0398  * Enable the clock at the DSC slice level
0399  */
0400 static int clk_scu_prepare(struct clk_hw *hw)
0401 {
0402     struct clk_scu *clk = to_clk_scu(hw);
0403 
0404     return sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id,
0405                   clk->clk_type, true, false);
0406 }
0407 
0408 /*
0409  * clk_scu_unprepare - Disable a SCU clock
0410  * @hw: clock to enable
0411  *
0412  * Disable the clock at the DSC slice level
0413  */
0414 static void clk_scu_unprepare(struct clk_hw *hw)
0415 {
0416     struct clk_scu *clk = to_clk_scu(hw);
0417     int ret;
0418 
0419     ret = sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id,
0420                  clk->clk_type, false, false);
0421     if (ret)
0422         pr_warn("%s: clk unprepare failed %d\n", clk_hw_get_name(hw),
0423             ret);
0424 }
0425 
0426 static const struct clk_ops clk_scu_ops = {
0427     .recalc_rate = clk_scu_recalc_rate,
0428     .round_rate = clk_scu_round_rate,
0429     .set_rate = clk_scu_set_rate,
0430     .get_parent = clk_scu_get_parent,
0431     .set_parent = clk_scu_set_parent,
0432     .prepare = clk_scu_prepare,
0433     .unprepare = clk_scu_unprepare,
0434 };
0435 
0436 static const struct clk_ops clk_scu_cpu_ops = {
0437     .recalc_rate = clk_scu_recalc_rate,
0438     .round_rate = clk_scu_round_rate,
0439     .set_rate = clk_scu_atf_set_cpu_rate,
0440     .prepare = clk_scu_prepare,
0441     .unprepare = clk_scu_unprepare,
0442 };
0443 
0444 static const struct clk_ops clk_scu_pi_ops = {
0445     .recalc_rate = clk_scu_recalc_rate,
0446     .round_rate  = clk_scu_round_rate,
0447     .set_rate    = clk_scu_set_rate,
0448 };
0449 
0450 struct clk_hw *__imx_clk_scu(struct device *dev, const char *name,
0451                  const char * const *parents, int num_parents,
0452                  u32 rsrc_id, u8 clk_type)
0453 {
0454     struct clk_init_data init;
0455     struct clk_scu *clk;
0456     struct clk_hw *hw;
0457     int ret;
0458 
0459     clk = kzalloc(sizeof(*clk), GFP_KERNEL);
0460     if (!clk)
0461         return ERR_PTR(-ENOMEM);
0462 
0463     clk->rsrc_id = rsrc_id;
0464     clk->clk_type = clk_type;
0465 
0466     init.name = name;
0467     init.ops = &clk_scu_ops;
0468     if (rsrc_id == IMX_SC_R_A35 || rsrc_id == IMX_SC_R_A53 || rsrc_id == IMX_SC_R_A72)
0469         init.ops = &clk_scu_cpu_ops;
0470     else if (rsrc_id == IMX_SC_R_PI_0_PLL)
0471         init.ops = &clk_scu_pi_ops;
0472     else
0473         init.ops = &clk_scu_ops;
0474     init.parent_names = parents;
0475     init.num_parents = num_parents;
0476 
0477     /*
0478      * Note on MX8, the clocks are tightly coupled with power domain
0479      * that once the power domain is off, the clock status may be
0480      * lost. So we make it NOCACHE to let user to retrieve the real
0481      * clock status from HW instead of using the possible invalid
0482      * cached rate.
0483      */
0484     init.flags = CLK_GET_RATE_NOCACHE;
0485     clk->hw.init = &init;
0486 
0487     hw = &clk->hw;
0488     ret = clk_hw_register(dev, hw);
0489     if (ret) {
0490         kfree(clk);
0491         hw = ERR_PTR(ret);
0492         return hw;
0493     }
0494 
0495     if (dev)
0496         dev_set_drvdata(dev, clk);
0497 
0498     return hw;
0499 }
0500 
0501 struct clk_hw *imx_scu_of_clk_src_get(struct of_phandle_args *clkspec,
0502                       void *data)
0503 {
0504     unsigned int rsrc = clkspec->args[0];
0505     unsigned int idx = clkspec->args[1];
0506     struct list_head *scu_clks = data;
0507     struct imx_scu_clk_node *clk;
0508 
0509     list_for_each_entry(clk, &scu_clks[rsrc], node) {
0510         if (clk->clk_type == idx)
0511             return clk->hw;
0512     }
0513 
0514     return ERR_PTR(-ENODEV);
0515 }
0516 
0517 static int imx_clk_scu_probe(struct platform_device *pdev)
0518 {
0519     struct device *dev = &pdev->dev;
0520     struct imx_scu_clk_node *clk = dev_get_platdata(dev);
0521     struct clk_hw *hw;
0522     int ret;
0523 
0524     if (!((clk->rsrc == IMX_SC_R_A35) || (clk->rsrc == IMX_SC_R_A53) ||
0525         (clk->rsrc == IMX_SC_R_A72))) {
0526         pm_runtime_set_suspended(dev);
0527         pm_runtime_set_autosuspend_delay(dev, 50);
0528         pm_runtime_use_autosuspend(&pdev->dev);
0529         pm_runtime_enable(dev);
0530 
0531         ret = pm_runtime_resume_and_get(dev);
0532         if (ret) {
0533             pm_genpd_remove_device(dev);
0534             pm_runtime_disable(dev);
0535             return ret;
0536         }
0537     }
0538 
0539     hw = __imx_clk_scu(dev, clk->name, clk->parents, clk->num_parents,
0540                clk->rsrc, clk->clk_type);
0541     if (IS_ERR(hw)) {
0542         pm_runtime_disable(dev);
0543         return PTR_ERR(hw);
0544     }
0545 
0546     clk->hw = hw;
0547     list_add_tail(&clk->node, &imx_scu_clks[clk->rsrc]);
0548 
0549     if (!((clk->rsrc == IMX_SC_R_A35) || (clk->rsrc == IMX_SC_R_A53) ||
0550         (clk->rsrc == IMX_SC_R_A72))) {
0551         pm_runtime_mark_last_busy(&pdev->dev);
0552         pm_runtime_put_autosuspend(&pdev->dev);
0553     }
0554 
0555     dev_dbg(dev, "register SCU clock rsrc:%d type:%d\n", clk->rsrc,
0556         clk->clk_type);
0557 
0558     return 0;
0559 }
0560 
0561 static int __maybe_unused imx_clk_scu_suspend(struct device *dev)
0562 {
0563     struct clk_scu *clk = dev_get_drvdata(dev);
0564     u32 rsrc_id = clk->rsrc_id;
0565 
0566     if ((rsrc_id == IMX_SC_R_A35) || (rsrc_id == IMX_SC_R_A53) ||
0567         (rsrc_id == IMX_SC_R_A72))
0568         return 0;
0569 
0570     clk->parent = clk_hw_get_parent(&clk->hw);
0571 
0572     /* DC SS needs to handle bypass clock using non-cached clock rate */
0573     if (clk->rsrc_id == IMX_SC_R_DC_0_VIDEO0 ||
0574         clk->rsrc_id == IMX_SC_R_DC_0_VIDEO1 ||
0575         clk->rsrc_id == IMX_SC_R_DC_1_VIDEO0 ||
0576         clk->rsrc_id == IMX_SC_R_DC_1_VIDEO1)
0577         clk->rate = clk_scu_recalc_rate(&clk->hw, 0);
0578     else
0579         clk->rate = clk_hw_get_rate(&clk->hw);
0580     clk->is_enabled = clk_hw_is_enabled(&clk->hw);
0581 
0582     if (clk->parent)
0583         dev_dbg(dev, "save parent %s idx %u\n", clk_hw_get_name(clk->parent),
0584             clk->parent_index);
0585 
0586     if (clk->rate)
0587         dev_dbg(dev, "save rate %d\n", clk->rate);
0588 
0589     if (clk->is_enabled)
0590         dev_dbg(dev, "save enabled state\n");
0591 
0592     return 0;
0593 }
0594 
0595 static int __maybe_unused imx_clk_scu_resume(struct device *dev)
0596 {
0597     struct clk_scu *clk = dev_get_drvdata(dev);
0598     u32 rsrc_id = clk->rsrc_id;
0599     int ret = 0;
0600 
0601     if ((rsrc_id == IMX_SC_R_A35) || (rsrc_id == IMX_SC_R_A53) ||
0602         (rsrc_id == IMX_SC_R_A72))
0603         return 0;
0604 
0605     if (clk->parent) {
0606         ret = clk_scu_set_parent(&clk->hw, clk->parent_index);
0607         dev_dbg(dev, "restore parent %s idx %u %s\n",
0608             clk_hw_get_name(clk->parent),
0609             clk->parent_index, !ret ? "success" : "failed");
0610     }
0611 
0612     if (clk->rate) {
0613         ret = clk_scu_set_rate(&clk->hw, clk->rate, 0);
0614         dev_dbg(dev, "restore rate %d %s\n", clk->rate,
0615             !ret ? "success" : "failed");
0616     }
0617 
0618     if (clk->is_enabled && rsrc_id != IMX_SC_R_PI_0_PLL) {
0619         ret = clk_scu_prepare(&clk->hw);
0620         dev_dbg(dev, "restore enabled state %s\n",
0621             !ret ? "success" : "failed");
0622     }
0623 
0624     return ret;
0625 }
0626 
0627 static const struct dev_pm_ops imx_clk_scu_pm_ops = {
0628     SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_clk_scu_suspend,
0629                       imx_clk_scu_resume)
0630 };
0631 
0632 static struct platform_driver imx_clk_scu_driver = {
0633     .driver = {
0634         .name = "imx-scu-clk",
0635         .suppress_bind_attrs = true,
0636         .pm = &imx_clk_scu_pm_ops,
0637     },
0638     .probe = imx_clk_scu_probe,
0639 };
0640 
0641 static int imx_clk_scu_attach_pd(struct device *dev, u32 rsrc_id)
0642 {
0643     struct of_phandle_args genpdspec = {
0644         .np = pd_np,
0645         .args_count = 1,
0646         .args[0] = rsrc_id,
0647     };
0648 
0649     if (rsrc_id == IMX_SC_R_A35 || rsrc_id == IMX_SC_R_A53 ||
0650         rsrc_id == IMX_SC_R_A72)
0651         return 0;
0652 
0653     return of_genpd_add_device(&genpdspec, dev);
0654 }
0655 
0656 struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
0657                      const char * const *parents,
0658                      int num_parents, u32 rsrc_id, u8 clk_type)
0659 {
0660     struct imx_scu_clk_node clk = {
0661         .name = name,
0662         .rsrc = rsrc_id,
0663         .clk_type = clk_type,
0664         .parents = parents,
0665         .num_parents = num_parents,
0666     };
0667     struct platform_device *pdev;
0668     int ret;
0669 
0670     if (!imx_scu_clk_is_valid(rsrc_id))
0671         return ERR_PTR(-EINVAL);
0672 
0673     pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE);
0674     if (!pdev) {
0675         pr_err("%s: failed to allocate scu clk dev rsrc %d type %d\n",
0676                name, rsrc_id, clk_type);
0677         return ERR_PTR(-ENOMEM);
0678     }
0679 
0680     ret = platform_device_add_data(pdev, &clk, sizeof(clk));
0681     if (ret) {
0682         platform_device_put(pdev);
0683         return ERR_PTR(ret);
0684     }
0685 
0686     ret = driver_set_override(&pdev->dev, &pdev->driver_override,
0687                   "imx-scu-clk", strlen("imx-scu-clk"));
0688     if (ret) {
0689         platform_device_put(pdev);
0690         return ERR_PTR(ret);
0691     }
0692 
0693     ret = imx_clk_scu_attach_pd(&pdev->dev, rsrc_id);
0694     if (ret)
0695         pr_warn("%s: failed to attached the power domain %d\n",
0696             name, ret);
0697 
0698     platform_device_add(pdev);
0699 
0700     /* For API backwards compatiblilty, simply return NULL for success */
0701     return NULL;
0702 }
0703 
0704 void imx_clk_scu_unregister(void)
0705 {
0706     struct imx_scu_clk_node *clk;
0707     int i;
0708 
0709     for (i = 0; i < IMX_SC_R_LAST; i++) {
0710         list_for_each_entry(clk, &imx_scu_clks[i], node) {
0711             clk_hw_unregister(clk->hw);
0712             kfree(clk);
0713         }
0714     }
0715 }
0716 
0717 static unsigned long clk_gpr_div_scu_recalc_rate(struct clk_hw *hw,
0718                          unsigned long parent_rate)
0719 {
0720     struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
0721     unsigned long rate = 0;
0722     u32 val;
0723     int err;
0724 
0725     err = imx_sc_misc_get_control(ccm_ipc_handle, clk->rsrc_id,
0726                       clk->gpr_id, &val);
0727 
0728     rate  = val ? parent_rate / 2 : parent_rate;
0729 
0730     return err ? 0 : rate;
0731 }
0732 
0733 static long clk_gpr_div_scu_round_rate(struct clk_hw *hw, unsigned long rate,
0734                    unsigned long *prate)
0735 {
0736     if (rate < *prate)
0737         rate = *prate / 2;
0738     else
0739         rate = *prate;
0740 
0741     return rate;
0742 }
0743 
0744 static int clk_gpr_div_scu_set_rate(struct clk_hw *hw, unsigned long rate,
0745                     unsigned long parent_rate)
0746 {
0747     struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
0748     uint32_t val;
0749     int err;
0750 
0751     val = (rate < parent_rate) ? 1 : 0;
0752     err = imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
0753                       clk->gpr_id, val);
0754 
0755     return err ? -EINVAL : 0;
0756 }
0757 
0758 static const struct clk_ops clk_gpr_div_scu_ops = {
0759     .recalc_rate = clk_gpr_div_scu_recalc_rate,
0760     .round_rate = clk_gpr_div_scu_round_rate,
0761     .set_rate = clk_gpr_div_scu_set_rate,
0762 };
0763 
0764 static u8 clk_gpr_mux_scu_get_parent(struct clk_hw *hw)
0765 {
0766     struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
0767     u32 val = 0;
0768 
0769     imx_sc_misc_get_control(ccm_ipc_handle, clk->rsrc_id,
0770                 clk->gpr_id, &val);
0771 
0772     return (u8)val;
0773 }
0774 
0775 static int clk_gpr_mux_scu_set_parent(struct clk_hw *hw, u8 index)
0776 {
0777     struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
0778 
0779     return imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
0780                        clk->gpr_id, index);
0781 }
0782 
0783 static const struct clk_ops clk_gpr_mux_scu_ops = {
0784     .get_parent = clk_gpr_mux_scu_get_parent,
0785     .set_parent = clk_gpr_mux_scu_set_parent,
0786 };
0787 
0788 static int clk_gpr_gate_scu_prepare(struct clk_hw *hw)
0789 {
0790     struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
0791 
0792     return imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
0793                        clk->gpr_id, !clk->gate_invert);
0794 }
0795 
0796 static void clk_gpr_gate_scu_unprepare(struct clk_hw *hw)
0797 {
0798     struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
0799     int ret;
0800 
0801     ret = imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
0802                       clk->gpr_id, clk->gate_invert);
0803     if (ret)
0804         pr_err("%s: clk unprepare failed %d\n", clk_hw_get_name(hw),
0805                ret);
0806 }
0807 
0808 static int clk_gpr_gate_scu_is_prepared(struct clk_hw *hw)
0809 {
0810     struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
0811     int ret;
0812     u32 val;
0813 
0814     ret = imx_sc_misc_get_control(ccm_ipc_handle, clk->rsrc_id,
0815                       clk->gpr_id, &val);
0816     if (ret)
0817         return ret;
0818 
0819     return clk->gate_invert ? !val : val;
0820 }
0821 
0822 static const struct clk_ops clk_gpr_gate_scu_ops = {
0823     .prepare = clk_gpr_gate_scu_prepare,
0824     .unprepare = clk_gpr_gate_scu_unprepare,
0825     .is_prepared = clk_gpr_gate_scu_is_prepared,
0826 };
0827 
0828 struct clk_hw *__imx_clk_gpr_scu(const char *name, const char * const *parent_name,
0829                  int num_parents, u32 rsrc_id, u8 gpr_id, u8 flags,
0830                  bool invert)
0831 {
0832     struct imx_scu_clk_node *clk_node;
0833     struct clk_gpr_scu *clk;
0834     struct clk_hw *hw;
0835     struct clk_init_data init;
0836     int ret;
0837 
0838     if (rsrc_id >= IMX_SC_R_LAST || gpr_id >= IMX_SC_C_LAST)
0839         return ERR_PTR(-EINVAL);
0840 
0841     clk_node = kzalloc(sizeof(*clk_node), GFP_KERNEL);
0842     if (!clk_node)
0843         return ERR_PTR(-ENOMEM);
0844 
0845     if (!imx_scu_clk_is_valid(rsrc_id)) {
0846         kfree(clk_node);
0847         return ERR_PTR(-EINVAL);
0848     }
0849 
0850     clk = kzalloc(sizeof(*clk), GFP_KERNEL);
0851     if (!clk) {
0852         kfree(clk_node);
0853         return ERR_PTR(-ENOMEM);
0854     }
0855 
0856     clk->rsrc_id = rsrc_id;
0857     clk->gpr_id = gpr_id;
0858     clk->flags = flags;
0859     clk->gate_invert = invert;
0860 
0861     if (flags & IMX_SCU_GPR_CLK_GATE)
0862         init.ops = &clk_gpr_gate_scu_ops;
0863 
0864     if (flags & IMX_SCU_GPR_CLK_DIV)
0865         init.ops = &clk_gpr_div_scu_ops;
0866 
0867     if (flags & IMX_SCU_GPR_CLK_MUX)
0868         init.ops = &clk_gpr_mux_scu_ops;
0869 
0870     init.flags = 0;
0871     init.name = name;
0872     init.parent_names = parent_name;
0873     init.num_parents = num_parents;
0874 
0875     clk->hw.init = &init;
0876 
0877     hw = &clk->hw;
0878     ret = clk_hw_register(NULL, hw);
0879     if (ret) {
0880         kfree(clk);
0881         kfree(clk_node);
0882         hw = ERR_PTR(ret);
0883     } else {
0884         clk_node->hw = hw;
0885         clk_node->clk_type = gpr_id;
0886         list_add_tail(&clk_node->node, &imx_scu_clks[rsrc_id]);
0887     }
0888 
0889     return hw;
0890 }