0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/platform_device.h>
0011 #include <linux/clk.h>
0012 #include <linux/clk-provider.h>
0013 #include <linux/slab.h>
0014 #include <linux/io.h>
0015 #include <linux/of.h>
0016 #include <linux/module.h>
0017 #include <linux/err.h>
0018 #include <linux/iopoll.h>
0019
0020 #define WZRD_NUM_OUTPUTS 7
0021 #define WZRD_ACLK_MAX_FREQ 250000000UL
0022
0023 #define WZRD_CLK_CFG_REG(n) (0x200 + 4 * (n))
0024
0025 #define WZRD_CLKOUT0_FRAC_EN BIT(18)
0026 #define WZRD_CLKFBOUT_FRAC_EN BIT(26)
0027
0028 #define WZRD_CLKFBOUT_MULT_SHIFT 8
0029 #define WZRD_CLKFBOUT_MULT_MASK (0xff << WZRD_CLKFBOUT_MULT_SHIFT)
0030 #define WZRD_CLKFBOUT_FRAC_SHIFT 16
0031 #define WZRD_CLKFBOUT_FRAC_MASK (0x3ff << WZRD_CLKFBOUT_FRAC_SHIFT)
0032 #define WZRD_DIVCLK_DIVIDE_SHIFT 0
0033 #define WZRD_DIVCLK_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
0034 #define WZRD_CLKOUT_DIVIDE_SHIFT 0
0035 #define WZRD_CLKOUT_DIVIDE_WIDTH 8
0036 #define WZRD_CLKOUT_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
0037 #define WZRD_CLKOUT_FRAC_SHIFT 8
0038 #define WZRD_CLKOUT_FRAC_MASK 0x3ff
0039
0040 #define WZRD_DR_MAX_INT_DIV_VALUE 255
0041 #define WZRD_DR_STATUS_REG_OFFSET 0x04
0042 #define WZRD_DR_LOCK_BIT_MASK 0x00000001
0043 #define WZRD_DR_INIT_REG_OFFSET 0x25C
0044 #define WZRD_DR_DIV_TO_PHASE_OFFSET 4
0045 #define WZRD_DR_BEGIN_DYNA_RECONF 0x03
0046
0047 #define WZRD_USEC_POLL 10
0048 #define WZRD_TIMEOUT_POLL 1000
0049
0050 #define div_mask(width) ((1 << (width)) - 1)
0051
0052
0053 #define to_clk_wzrd_divider(_hw) container_of(_hw, struct clk_wzrd_divider, hw)
0054
0055 enum clk_wzrd_int_clks {
0056 wzrd_clk_mul,
0057 wzrd_clk_mul_div,
0058 wzrd_clk_mul_frac,
0059 wzrd_clk_int_max
0060 };
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075 struct clk_wzrd {
0076 struct clk_onecell_data clk_data;
0077 struct notifier_block nb;
0078 void __iomem *base;
0079 struct clk *clk_in1;
0080 struct clk *axi_clk;
0081 struct clk *clks_internal[wzrd_clk_int_max];
0082 struct clk *clkout[WZRD_NUM_OUTPUTS];
0083 unsigned int speed_grade;
0084 bool suspended;
0085 };
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 struct clk_wzrd_divider {
0100 struct clk_hw hw;
0101 void __iomem *base;
0102 u16 offset;
0103 u8 shift;
0104 u8 width;
0105 u8 flags;
0106 const struct clk_div_table *table;
0107 spinlock_t *lock;
0108 };
0109
0110 #define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
0111
0112
0113 static const unsigned long clk_wzrd_max_freq[] = {
0114 800000000UL,
0115 933000000UL,
0116 1066000000UL
0117 };
0118
0119
0120 static DEFINE_SPINLOCK(clkwzrd_lock);
0121
0122 static unsigned long clk_wzrd_recalc_rate(struct clk_hw *hw,
0123 unsigned long parent_rate)
0124 {
0125 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
0126 void __iomem *div_addr = divider->base + divider->offset;
0127 unsigned int val;
0128
0129 val = readl(div_addr) >> divider->shift;
0130 val &= div_mask(divider->width);
0131
0132 return divider_recalc_rate(hw, parent_rate, val, divider->table,
0133 divider->flags, divider->width);
0134 }
0135
0136 static int clk_wzrd_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
0137 unsigned long parent_rate)
0138 {
0139 int err;
0140 u32 value;
0141 unsigned long flags = 0;
0142 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
0143 void __iomem *div_addr = divider->base + divider->offset;
0144
0145 if (divider->lock)
0146 spin_lock_irqsave(divider->lock, flags);
0147 else
0148 __acquire(divider->lock);
0149
0150 value = DIV_ROUND_CLOSEST(parent_rate, rate);
0151
0152
0153 min_t(u32, value, WZRD_DR_MAX_INT_DIV_VALUE);
0154
0155
0156 writel(value, div_addr);
0157 writel(0x00, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
0158
0159
0160 err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET,
0161 value, value & WZRD_DR_LOCK_BIT_MASK,
0162 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
0163 if (err)
0164 goto err_reconfig;
0165
0166
0167 writel(WZRD_DR_BEGIN_DYNA_RECONF,
0168 divider->base + WZRD_DR_INIT_REG_OFFSET);
0169
0170
0171 err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET,
0172 value, value & WZRD_DR_LOCK_BIT_MASK,
0173 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
0174 err_reconfig:
0175 if (divider->lock)
0176 spin_unlock_irqrestore(divider->lock, flags);
0177 else
0178 __release(divider->lock);
0179 return err;
0180 }
0181
0182 static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
0183 unsigned long *prate)
0184 {
0185 u8 div;
0186
0187
0188
0189
0190
0191 div = DIV_ROUND_CLOSEST(*prate, rate);
0192
0193 return *prate / div;
0194 }
0195
0196 static const struct clk_ops clk_wzrd_clk_divider_ops = {
0197 .round_rate = clk_wzrd_round_rate,
0198 .set_rate = clk_wzrd_dynamic_reconfig,
0199 .recalc_rate = clk_wzrd_recalc_rate,
0200 };
0201
0202 static unsigned long clk_wzrd_recalc_ratef(struct clk_hw *hw,
0203 unsigned long parent_rate)
0204 {
0205 unsigned int val;
0206 u32 div, frac;
0207 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
0208 void __iomem *div_addr = divider->base + divider->offset;
0209
0210 val = readl(div_addr);
0211 div = val & div_mask(divider->width);
0212 frac = (val >> WZRD_CLKOUT_FRAC_SHIFT) & WZRD_CLKOUT_FRAC_MASK;
0213
0214 return mult_frac(parent_rate, 1000, (div * 1000) + frac);
0215 }
0216
0217 static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
0218 unsigned long parent_rate)
0219 {
0220 int err;
0221 u32 value, pre;
0222 unsigned long rate_div, f, clockout0_div;
0223 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
0224 void __iomem *div_addr = divider->base + divider->offset;
0225
0226 rate_div = ((parent_rate * 1000) / rate);
0227 clockout0_div = rate_div / 1000;
0228
0229 pre = DIV_ROUND_CLOSEST((parent_rate * 1000), rate);
0230 f = (u32)(pre - (clockout0_div * 1000));
0231 f = f & WZRD_CLKOUT_FRAC_MASK;
0232 f = f << WZRD_CLKOUT_DIVIDE_WIDTH;
0233
0234 value = (f | (clockout0_div & WZRD_CLKOUT_DIVIDE_MASK));
0235
0236
0237 writel(value, div_addr);
0238 writel(0x0, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
0239
0240
0241 err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
0242 value & WZRD_DR_LOCK_BIT_MASK,
0243 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
0244 if (err)
0245 return err;
0246
0247
0248 writel(WZRD_DR_BEGIN_DYNA_RECONF,
0249 divider->base + WZRD_DR_INIT_REG_OFFSET);
0250
0251
0252 return readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
0253 value & WZRD_DR_LOCK_BIT_MASK,
0254 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
0255 }
0256
0257 static long clk_wzrd_round_rate_f(struct clk_hw *hw, unsigned long rate,
0258 unsigned long *prate)
0259 {
0260 return rate;
0261 }
0262
0263 static const struct clk_ops clk_wzrd_clk_divider_ops_f = {
0264 .round_rate = clk_wzrd_round_rate_f,
0265 .set_rate = clk_wzrd_dynamic_reconfig_f,
0266 .recalc_rate = clk_wzrd_recalc_ratef,
0267 };
0268
0269 static struct clk *clk_wzrd_register_divf(struct device *dev,
0270 const char *name,
0271 const char *parent_name,
0272 unsigned long flags,
0273 void __iomem *base, u16 offset,
0274 u8 shift, u8 width,
0275 u8 clk_divider_flags,
0276 const struct clk_div_table *table,
0277 spinlock_t *lock)
0278 {
0279 struct clk_wzrd_divider *div;
0280 struct clk_hw *hw;
0281 struct clk_init_data init;
0282 int ret;
0283
0284 div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
0285 if (!div)
0286 return ERR_PTR(-ENOMEM);
0287
0288 init.name = name;
0289
0290 init.ops = &clk_wzrd_clk_divider_ops_f;
0291
0292 init.flags = flags;
0293 init.parent_names = &parent_name;
0294 init.num_parents = 1;
0295
0296 div->base = base;
0297 div->offset = offset;
0298 div->shift = shift;
0299 div->width = width;
0300 div->flags = clk_divider_flags;
0301 div->lock = lock;
0302 div->hw.init = &init;
0303 div->table = table;
0304
0305 hw = &div->hw;
0306 ret = devm_clk_hw_register(dev, hw);
0307 if (ret)
0308 return ERR_PTR(ret);
0309
0310 return hw->clk;
0311 }
0312
0313 static struct clk *clk_wzrd_register_divider(struct device *dev,
0314 const char *name,
0315 const char *parent_name,
0316 unsigned long flags,
0317 void __iomem *base, u16 offset,
0318 u8 shift, u8 width,
0319 u8 clk_divider_flags,
0320 const struct clk_div_table *table,
0321 spinlock_t *lock)
0322 {
0323 struct clk_wzrd_divider *div;
0324 struct clk_hw *hw;
0325 struct clk_init_data init;
0326 int ret;
0327
0328 div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
0329 if (!div)
0330 return ERR_PTR(-ENOMEM);
0331
0332 init.name = name;
0333 init.ops = &clk_wzrd_clk_divider_ops;
0334 init.flags = flags;
0335 init.parent_names = &parent_name;
0336 init.num_parents = 1;
0337
0338 div->base = base;
0339 div->offset = offset;
0340 div->shift = shift;
0341 div->width = width;
0342 div->flags = clk_divider_flags;
0343 div->lock = lock;
0344 div->hw.init = &init;
0345 div->table = table;
0346
0347 hw = &div->hw;
0348 ret = devm_clk_hw_register(dev, hw);
0349 if (ret)
0350 hw = ERR_PTR(ret);
0351
0352 return hw->clk;
0353 }
0354
0355 static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event,
0356 void *data)
0357 {
0358 unsigned long max;
0359 struct clk_notifier_data *ndata = data;
0360 struct clk_wzrd *clk_wzrd = to_clk_wzrd(nb);
0361
0362 if (clk_wzrd->suspended)
0363 return NOTIFY_OK;
0364
0365 if (ndata->clk == clk_wzrd->clk_in1)
0366 max = clk_wzrd_max_freq[clk_wzrd->speed_grade - 1];
0367 else if (ndata->clk == clk_wzrd->axi_clk)
0368 max = WZRD_ACLK_MAX_FREQ;
0369 else
0370 return NOTIFY_DONE;
0371
0372 switch (event) {
0373 case PRE_RATE_CHANGE:
0374 if (ndata->new_rate > max)
0375 return NOTIFY_BAD;
0376 return NOTIFY_OK;
0377 case POST_RATE_CHANGE:
0378 case ABORT_RATE_CHANGE:
0379 default:
0380 return NOTIFY_DONE;
0381 }
0382 }
0383
0384 static int __maybe_unused clk_wzrd_suspend(struct device *dev)
0385 {
0386 struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
0387
0388 clk_disable_unprepare(clk_wzrd->axi_clk);
0389 clk_wzrd->suspended = true;
0390
0391 return 0;
0392 }
0393
0394 static int __maybe_unused clk_wzrd_resume(struct device *dev)
0395 {
0396 int ret;
0397 struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
0398
0399 ret = clk_prepare_enable(clk_wzrd->axi_clk);
0400 if (ret) {
0401 dev_err(dev, "unable to enable s_axi_aclk\n");
0402 return ret;
0403 }
0404
0405 clk_wzrd->suspended = false;
0406
0407 return 0;
0408 }
0409
0410 static SIMPLE_DEV_PM_OPS(clk_wzrd_dev_pm_ops, clk_wzrd_suspend,
0411 clk_wzrd_resume);
0412
0413 static int clk_wzrd_probe(struct platform_device *pdev)
0414 {
0415 int i, ret;
0416 u32 reg, reg_f, mult;
0417 unsigned long rate;
0418 const char *clk_name;
0419 void __iomem *ctrl_reg;
0420 struct clk_wzrd *clk_wzrd;
0421 struct device_node *np = pdev->dev.of_node;
0422 int nr_outputs;
0423 unsigned long flags = 0;
0424
0425 clk_wzrd = devm_kzalloc(&pdev->dev, sizeof(*clk_wzrd), GFP_KERNEL);
0426 if (!clk_wzrd)
0427 return -ENOMEM;
0428 platform_set_drvdata(pdev, clk_wzrd);
0429
0430 clk_wzrd->base = devm_platform_ioremap_resource(pdev, 0);
0431 if (IS_ERR(clk_wzrd->base))
0432 return PTR_ERR(clk_wzrd->base);
0433
0434 ret = of_property_read_u32(np, "xlnx,speed-grade", &clk_wzrd->speed_grade);
0435 if (!ret) {
0436 if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) {
0437 dev_warn(&pdev->dev, "invalid speed grade '%d'\n",
0438 clk_wzrd->speed_grade);
0439 clk_wzrd->speed_grade = 0;
0440 }
0441 }
0442
0443 clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
0444 if (IS_ERR(clk_wzrd->clk_in1)) {
0445 if (clk_wzrd->clk_in1 != ERR_PTR(-EPROBE_DEFER))
0446 dev_err(&pdev->dev, "clk_in1 not found\n");
0447 return PTR_ERR(clk_wzrd->clk_in1);
0448 }
0449
0450 clk_wzrd->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
0451 if (IS_ERR(clk_wzrd->axi_clk)) {
0452 if (clk_wzrd->axi_clk != ERR_PTR(-EPROBE_DEFER))
0453 dev_err(&pdev->dev, "s_axi_aclk not found\n");
0454 return PTR_ERR(clk_wzrd->axi_clk);
0455 }
0456 ret = clk_prepare_enable(clk_wzrd->axi_clk);
0457 if (ret) {
0458 dev_err(&pdev->dev, "enabling s_axi_aclk failed\n");
0459 return ret;
0460 }
0461 rate = clk_get_rate(clk_wzrd->axi_clk);
0462 if (rate > WZRD_ACLK_MAX_FREQ) {
0463 dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n",
0464 rate);
0465 ret = -EINVAL;
0466 goto err_disable_clk;
0467 }
0468
0469 reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0));
0470 reg_f = reg & WZRD_CLKFBOUT_FRAC_MASK;
0471 reg_f = reg_f >> WZRD_CLKFBOUT_FRAC_SHIFT;
0472
0473 reg = reg & WZRD_CLKFBOUT_MULT_MASK;
0474 reg = reg >> WZRD_CLKFBOUT_MULT_SHIFT;
0475 mult = (reg * 1000) + reg_f;
0476 clk_name = kasprintf(GFP_KERNEL, "%s_mul", dev_name(&pdev->dev));
0477 if (!clk_name) {
0478 ret = -ENOMEM;
0479 goto err_disable_clk;
0480 }
0481
0482 ret = of_property_read_u32(np, "nr-outputs", &nr_outputs);
0483 if (ret || nr_outputs > WZRD_NUM_OUTPUTS) {
0484 ret = -EINVAL;
0485 goto err_disable_clk;
0486 }
0487 if (nr_outputs == 1)
0488 flags = CLK_SET_RATE_PARENT;
0489
0490 clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor
0491 (&pdev->dev, clk_name,
0492 __clk_get_name(clk_wzrd->clk_in1),
0493 0, mult, 1000);
0494 if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
0495 dev_err(&pdev->dev, "unable to register fixed-factor clock\n");
0496 ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]);
0497 goto err_disable_clk;
0498 }
0499
0500 clk_name = kasprintf(GFP_KERNEL, "%s_mul_div", dev_name(&pdev->dev));
0501 if (!clk_name) {
0502 ret = -ENOMEM;
0503 goto err_rm_int_clk;
0504 }
0505
0506 ctrl_reg = clk_wzrd->base + WZRD_CLK_CFG_REG(0);
0507
0508 clk_wzrd->clks_internal[wzrd_clk_mul_div] = clk_register_divider
0509 (&pdev->dev, clk_name,
0510 __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]),
0511 flags, ctrl_reg, 0, 8, CLK_DIVIDER_ONE_BASED |
0512 CLK_DIVIDER_ALLOW_ZERO, &clkwzrd_lock);
0513 if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) {
0514 dev_err(&pdev->dev, "unable to register divider clock\n");
0515 ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]);
0516 goto err_rm_int_clk;
0517 }
0518
0519
0520 for (i = nr_outputs - 1; i >= 0 ; i--) {
0521 const char *clkout_name;
0522
0523 clkout_name = kasprintf(GFP_KERNEL, "%s_out%d", dev_name(&pdev->dev), i);
0524 if (!clkout_name) {
0525 ret = -ENOMEM;
0526 goto err_rm_int_clk;
0527 }
0528
0529 if (!i)
0530 clk_wzrd->clkout[i] = clk_wzrd_register_divf
0531 (&pdev->dev, clkout_name,
0532 clk_name, flags,
0533 clk_wzrd->base, (WZRD_CLK_CFG_REG(2) + i * 12),
0534 WZRD_CLKOUT_DIVIDE_SHIFT,
0535 WZRD_CLKOUT_DIVIDE_WIDTH,
0536 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
0537 NULL, &clkwzrd_lock);
0538 else
0539 clk_wzrd->clkout[i] = clk_wzrd_register_divider
0540 (&pdev->dev, clkout_name,
0541 clk_name, 0,
0542 clk_wzrd->base, (WZRD_CLK_CFG_REG(2) + i * 12),
0543 WZRD_CLKOUT_DIVIDE_SHIFT,
0544 WZRD_CLKOUT_DIVIDE_WIDTH,
0545 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
0546 NULL, &clkwzrd_lock);
0547 if (IS_ERR(clk_wzrd->clkout[i])) {
0548 int j;
0549
0550 for (j = i + 1; j < nr_outputs; j++)
0551 clk_unregister(clk_wzrd->clkout[j]);
0552 dev_err(&pdev->dev,
0553 "unable to register divider clock\n");
0554 ret = PTR_ERR(clk_wzrd->clkout[i]);
0555 goto err_rm_int_clks;
0556 }
0557 }
0558
0559 kfree(clk_name);
0560
0561 clk_wzrd->clk_data.clks = clk_wzrd->clkout;
0562 clk_wzrd->clk_data.clk_num = ARRAY_SIZE(clk_wzrd->clkout);
0563 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_wzrd->clk_data);
0564
0565 if (clk_wzrd->speed_grade) {
0566 clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier;
0567
0568 ret = clk_notifier_register(clk_wzrd->clk_in1,
0569 &clk_wzrd->nb);
0570 if (ret)
0571 dev_warn(&pdev->dev,
0572 "unable to register clock notifier\n");
0573
0574 ret = clk_notifier_register(clk_wzrd->axi_clk, &clk_wzrd->nb);
0575 if (ret)
0576 dev_warn(&pdev->dev,
0577 "unable to register clock notifier\n");
0578 }
0579
0580 return 0;
0581
0582 err_rm_int_clks:
0583 clk_unregister(clk_wzrd->clks_internal[1]);
0584 err_rm_int_clk:
0585 kfree(clk_name);
0586 clk_unregister(clk_wzrd->clks_internal[0]);
0587 err_disable_clk:
0588 clk_disable_unprepare(clk_wzrd->axi_clk);
0589
0590 return ret;
0591 }
0592
0593 static int clk_wzrd_remove(struct platform_device *pdev)
0594 {
0595 int i;
0596 struct clk_wzrd *clk_wzrd = platform_get_drvdata(pdev);
0597
0598 of_clk_del_provider(pdev->dev.of_node);
0599
0600 for (i = 0; i < WZRD_NUM_OUTPUTS; i++)
0601 clk_unregister(clk_wzrd->clkout[i]);
0602 for (i = 0; i < wzrd_clk_int_max; i++)
0603 clk_unregister(clk_wzrd->clks_internal[i]);
0604
0605 if (clk_wzrd->speed_grade) {
0606 clk_notifier_unregister(clk_wzrd->axi_clk, &clk_wzrd->nb);
0607 clk_notifier_unregister(clk_wzrd->clk_in1, &clk_wzrd->nb);
0608 }
0609
0610 clk_disable_unprepare(clk_wzrd->axi_clk);
0611
0612 return 0;
0613 }
0614
0615 static const struct of_device_id clk_wzrd_ids[] = {
0616 { .compatible = "xlnx,clocking-wizard" },
0617 { },
0618 };
0619 MODULE_DEVICE_TABLE(of, clk_wzrd_ids);
0620
0621 static struct platform_driver clk_wzrd_driver = {
0622 .driver = {
0623 .name = "clk-wizard",
0624 .of_match_table = clk_wzrd_ids,
0625 .pm = &clk_wzrd_dev_pm_ops,
0626 },
0627 .probe = clk_wzrd_probe,
0628 .remove = clk_wzrd_remove,
0629 };
0630 module_platform_driver(clk_wzrd_driver);
0631
0632 MODULE_LICENSE("GPL");
0633 MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com");
0634 MODULE_DESCRIPTION("Driver for the Xilinx Clocking Wizard IP core");