Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Copyright 2017 NXP
0004  * Copyright 2011,2016 Freescale Semiconductor, Inc.
0005  * Copyright 2011 Linaro Ltd.
0006  */
0007 
0008 #include <linux/clk.h>
0009 #include <linux/hrtimer.h>
0010 #include <linux/init.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/io.h>
0013 #include <linux/module.h>
0014 #include <linux/of.h>
0015 #include <linux/of_address.h>
0016 #include <linux/of_device.h>
0017 #include <linux/perf_event.h>
0018 #include <linux/slab.h>
0019 
0020 #include "common.h"
0021 
0022 #define MMDC_MAPSR      0x404
0023 #define BP_MMDC_MAPSR_PSD   0
0024 #define BP_MMDC_MAPSR_PSS   4
0025 
0026 #define MMDC_MDMISC     0x18
0027 #define BM_MMDC_MDMISC_DDR_TYPE 0x18
0028 #define BP_MMDC_MDMISC_DDR_TYPE 0x3
0029 
0030 #define TOTAL_CYCLES        0x0
0031 #define BUSY_CYCLES     0x1
0032 #define READ_ACCESSES       0x2
0033 #define WRITE_ACCESSES      0x3
0034 #define READ_BYTES      0x4
0035 #define WRITE_BYTES     0x5
0036 
0037 /* Enables, resets, freezes, overflow profiling*/
0038 #define DBG_DIS         0x0
0039 #define DBG_EN          0x1
0040 #define DBG_RST         0x2
0041 #define PRF_FRZ         0x4
0042 #define CYC_OVF         0x8
0043 #define PROFILE_SEL     0x10
0044 
0045 #define MMDC_MADPCR0    0x410
0046 #define MMDC_MADPCR1    0x414
0047 #define MMDC_MADPSR0    0x418
0048 #define MMDC_MADPSR1    0x41C
0049 #define MMDC_MADPSR2    0x420
0050 #define MMDC_MADPSR3    0x424
0051 #define MMDC_MADPSR4    0x428
0052 #define MMDC_MADPSR5    0x42C
0053 
0054 #define MMDC_NUM_COUNTERS   6
0055 
0056 #define MMDC_FLAG_PROFILE_SEL   0x1
0057 #define MMDC_PRF_AXI_ID_CLEAR   0x0
0058 
0059 #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
0060 
0061 static int ddr_type;
0062 
0063 struct fsl_mmdc_devtype_data {
0064     unsigned int flags;
0065 };
0066 
0067 static const struct fsl_mmdc_devtype_data imx6q_data = {
0068 };
0069 
0070 static const struct fsl_mmdc_devtype_data imx6qp_data = {
0071     .flags = MMDC_FLAG_PROFILE_SEL,
0072 };
0073 
0074 static const struct of_device_id imx_mmdc_dt_ids[] = {
0075     { .compatible = "fsl,imx6q-mmdc", .data = (void *)&imx6q_data},
0076     { .compatible = "fsl,imx6qp-mmdc", .data = (void *)&imx6qp_data},
0077     { /* sentinel */ }
0078 };
0079 
0080 #ifdef CONFIG_PERF_EVENTS
0081 
0082 static enum cpuhp_state cpuhp_mmdc_state;
0083 static DEFINE_IDA(mmdc_ida);
0084 
0085 PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
0086 PMU_EVENT_ATTR_STRING(busy-cycles, mmdc_pmu_busy_cycles, "event=0x01")
0087 PMU_EVENT_ATTR_STRING(read-accesses, mmdc_pmu_read_accesses, "event=0x02")
0088 PMU_EVENT_ATTR_STRING(write-accesses, mmdc_pmu_write_accesses, "event=0x03")
0089 PMU_EVENT_ATTR_STRING(read-bytes, mmdc_pmu_read_bytes, "event=0x04")
0090 PMU_EVENT_ATTR_STRING(read-bytes.unit, mmdc_pmu_read_bytes_unit, "MB");
0091 PMU_EVENT_ATTR_STRING(read-bytes.scale, mmdc_pmu_read_bytes_scale, "0.000001");
0092 PMU_EVENT_ATTR_STRING(write-bytes, mmdc_pmu_write_bytes, "event=0x05")
0093 PMU_EVENT_ATTR_STRING(write-bytes.unit, mmdc_pmu_write_bytes_unit, "MB");
0094 PMU_EVENT_ATTR_STRING(write-bytes.scale, mmdc_pmu_write_bytes_scale, "0.000001");
0095 
0096 struct mmdc_pmu {
0097     struct pmu pmu;
0098     void __iomem *mmdc_base;
0099     cpumask_t cpu;
0100     struct hrtimer hrtimer;
0101     unsigned int active_events;
0102     struct device *dev;
0103     struct perf_event *mmdc_events[MMDC_NUM_COUNTERS];
0104     struct hlist_node node;
0105     struct fsl_mmdc_devtype_data *devtype_data;
0106     struct clk *mmdc_ipg_clk;
0107 };
0108 
0109 /*
0110  * Polling period is set to one second, overflow of total-cycles (the fastest
0111  * increasing counter) takes ten seconds so one second is safe
0112  */
0113 static unsigned int mmdc_pmu_poll_period_us = 1000000;
0114 
0115 module_param_named(pmu_pmu_poll_period_us, mmdc_pmu_poll_period_us, uint,
0116         S_IRUGO | S_IWUSR);
0117 
0118 static ktime_t mmdc_pmu_timer_period(void)
0119 {
0120     return ns_to_ktime((u64)mmdc_pmu_poll_period_us * 1000);
0121 }
0122 
0123 static ssize_t mmdc_pmu_cpumask_show(struct device *dev,
0124         struct device_attribute *attr, char *buf)
0125 {
0126     struct mmdc_pmu *pmu_mmdc = dev_get_drvdata(dev);
0127 
0128     return cpumap_print_to_pagebuf(true, buf, &pmu_mmdc->cpu);
0129 }
0130 
0131 static struct device_attribute mmdc_pmu_cpumask_attr =
0132     __ATTR(cpumask, S_IRUGO, mmdc_pmu_cpumask_show, NULL);
0133 
0134 static struct attribute *mmdc_pmu_cpumask_attrs[] = {
0135     &mmdc_pmu_cpumask_attr.attr,
0136     NULL,
0137 };
0138 
0139 static struct attribute_group mmdc_pmu_cpumask_attr_group = {
0140     .attrs = mmdc_pmu_cpumask_attrs,
0141 };
0142 
0143 static struct attribute *mmdc_pmu_events_attrs[] = {
0144     &mmdc_pmu_total_cycles.attr.attr,
0145     &mmdc_pmu_busy_cycles.attr.attr,
0146     &mmdc_pmu_read_accesses.attr.attr,
0147     &mmdc_pmu_write_accesses.attr.attr,
0148     &mmdc_pmu_read_bytes.attr.attr,
0149     &mmdc_pmu_read_bytes_unit.attr.attr,
0150     &mmdc_pmu_read_bytes_scale.attr.attr,
0151     &mmdc_pmu_write_bytes.attr.attr,
0152     &mmdc_pmu_write_bytes_unit.attr.attr,
0153     &mmdc_pmu_write_bytes_scale.attr.attr,
0154     NULL,
0155 };
0156 
0157 static struct attribute_group mmdc_pmu_events_attr_group = {
0158     .name = "events",
0159     .attrs = mmdc_pmu_events_attrs,
0160 };
0161 
0162 PMU_FORMAT_ATTR(event, "config:0-63");
0163 PMU_FORMAT_ATTR(axi_id, "config1:0-63");
0164 
0165 static struct attribute *mmdc_pmu_format_attrs[] = {
0166     &format_attr_event.attr,
0167     &format_attr_axi_id.attr,
0168     NULL,
0169 };
0170 
0171 static struct attribute_group mmdc_pmu_format_attr_group = {
0172     .name = "format",
0173     .attrs = mmdc_pmu_format_attrs,
0174 };
0175 
0176 static const struct attribute_group *attr_groups[] = {
0177     &mmdc_pmu_events_attr_group,
0178     &mmdc_pmu_format_attr_group,
0179     &mmdc_pmu_cpumask_attr_group,
0180     NULL,
0181 };
0182 
0183 static u32 mmdc_pmu_read_counter(struct mmdc_pmu *pmu_mmdc, int cfg)
0184 {
0185     void __iomem *mmdc_base, *reg;
0186 
0187     mmdc_base = pmu_mmdc->mmdc_base;
0188 
0189     switch (cfg) {
0190     case TOTAL_CYCLES:
0191         reg = mmdc_base + MMDC_MADPSR0;
0192         break;
0193     case BUSY_CYCLES:
0194         reg = mmdc_base + MMDC_MADPSR1;
0195         break;
0196     case READ_ACCESSES:
0197         reg = mmdc_base + MMDC_MADPSR2;
0198         break;
0199     case WRITE_ACCESSES:
0200         reg = mmdc_base + MMDC_MADPSR3;
0201         break;
0202     case READ_BYTES:
0203         reg = mmdc_base + MMDC_MADPSR4;
0204         break;
0205     case WRITE_BYTES:
0206         reg = mmdc_base + MMDC_MADPSR5;
0207         break;
0208     default:
0209         return WARN_ONCE(1,
0210             "invalid configuration %d for mmdc counter", cfg);
0211     }
0212     return readl(reg);
0213 }
0214 
0215 static int mmdc_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
0216 {
0217     struct mmdc_pmu *pmu_mmdc = hlist_entry_safe(node, struct mmdc_pmu, node);
0218     int target;
0219 
0220     if (!cpumask_test_and_clear_cpu(cpu, &pmu_mmdc->cpu))
0221         return 0;
0222 
0223     target = cpumask_any_but(cpu_online_mask, cpu);
0224     if (target >= nr_cpu_ids)
0225         return 0;
0226 
0227     perf_pmu_migrate_context(&pmu_mmdc->pmu, cpu, target);
0228     cpumask_set_cpu(target, &pmu_mmdc->cpu);
0229 
0230     return 0;
0231 }
0232 
0233 static bool mmdc_pmu_group_event_is_valid(struct perf_event *event,
0234                       struct pmu *pmu,
0235                       unsigned long *used_counters)
0236 {
0237     int cfg = event->attr.config;
0238 
0239     if (is_software_event(event))
0240         return true;
0241 
0242     if (event->pmu != pmu)
0243         return false;
0244 
0245     return !test_and_set_bit(cfg, used_counters);
0246 }
0247 
0248 /*
0249  * Each event has a single fixed-purpose counter, so we can only have a
0250  * single active event for each at any point in time. Here we just check
0251  * for duplicates, and rely on mmdc_pmu_event_init to verify that the HW
0252  * event numbers are valid.
0253  */
0254 static bool mmdc_pmu_group_is_valid(struct perf_event *event)
0255 {
0256     struct pmu *pmu = event->pmu;
0257     struct perf_event *leader = event->group_leader;
0258     struct perf_event *sibling;
0259     unsigned long counter_mask = 0;
0260 
0261     set_bit(leader->attr.config, &counter_mask);
0262 
0263     if (event != leader) {
0264         if (!mmdc_pmu_group_event_is_valid(event, pmu, &counter_mask))
0265             return false;
0266     }
0267 
0268     for_each_sibling_event(sibling, leader) {
0269         if (!mmdc_pmu_group_event_is_valid(sibling, pmu, &counter_mask))
0270             return false;
0271     }
0272 
0273     return true;
0274 }
0275 
0276 static int mmdc_pmu_event_init(struct perf_event *event)
0277 {
0278     struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
0279     int cfg = event->attr.config;
0280 
0281     if (event->attr.type != event->pmu->type)
0282         return -ENOENT;
0283 
0284     if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
0285         return -EOPNOTSUPP;
0286 
0287     if (event->cpu < 0) {
0288         dev_warn(pmu_mmdc->dev, "Can't provide per-task data!\n");
0289         return -EOPNOTSUPP;
0290     }
0291 
0292     if (event->attr.sample_period)
0293         return -EINVAL;
0294 
0295     if (cfg < 0 || cfg >= MMDC_NUM_COUNTERS)
0296         return -EINVAL;
0297 
0298     if (!mmdc_pmu_group_is_valid(event))
0299         return -EINVAL;
0300 
0301     event->cpu = cpumask_first(&pmu_mmdc->cpu);
0302     return 0;
0303 }
0304 
0305 static void mmdc_pmu_event_update(struct perf_event *event)
0306 {
0307     struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
0308     struct hw_perf_event *hwc = &event->hw;
0309     u64 delta, prev_raw_count, new_raw_count;
0310 
0311     do {
0312         prev_raw_count = local64_read(&hwc->prev_count);
0313         new_raw_count = mmdc_pmu_read_counter(pmu_mmdc,
0314                               event->attr.config);
0315     } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
0316         new_raw_count) != prev_raw_count);
0317 
0318     delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
0319 
0320     local64_add(delta, &event->count);
0321 }
0322 
0323 static void mmdc_pmu_event_start(struct perf_event *event, int flags)
0324 {
0325     struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
0326     struct hw_perf_event *hwc = &event->hw;
0327     void __iomem *mmdc_base, *reg;
0328     u32 val;
0329 
0330     mmdc_base = pmu_mmdc->mmdc_base;
0331     reg = mmdc_base + MMDC_MADPCR0;
0332 
0333     /*
0334      * hrtimer is required because mmdc does not provide an interrupt so
0335      * polling is necessary
0336      */
0337     hrtimer_start(&pmu_mmdc->hrtimer, mmdc_pmu_timer_period(),
0338             HRTIMER_MODE_REL_PINNED);
0339 
0340     local64_set(&hwc->prev_count, 0);
0341 
0342     writel(DBG_RST, reg);
0343 
0344     /*
0345      * Write the AXI id parameter to MADPCR1.
0346      */
0347     val = event->attr.config1;
0348     reg = mmdc_base + MMDC_MADPCR1;
0349     writel(val, reg);
0350 
0351     reg = mmdc_base + MMDC_MADPCR0;
0352     val = DBG_EN;
0353     if (pmu_mmdc->devtype_data->flags & MMDC_FLAG_PROFILE_SEL)
0354         val |= PROFILE_SEL;
0355 
0356     writel(val, reg);
0357 }
0358 
0359 static int mmdc_pmu_event_add(struct perf_event *event, int flags)
0360 {
0361     struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
0362     struct hw_perf_event *hwc = &event->hw;
0363 
0364     int cfg = event->attr.config;
0365 
0366     if (flags & PERF_EF_START)
0367         mmdc_pmu_event_start(event, flags);
0368 
0369     if (pmu_mmdc->mmdc_events[cfg] != NULL)
0370         return -EAGAIN;
0371 
0372     pmu_mmdc->mmdc_events[cfg] = event;
0373     pmu_mmdc->active_events++;
0374 
0375     local64_set(&hwc->prev_count, mmdc_pmu_read_counter(pmu_mmdc, cfg));
0376 
0377     return 0;
0378 }
0379 
0380 static void mmdc_pmu_event_stop(struct perf_event *event, int flags)
0381 {
0382     struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
0383     void __iomem *mmdc_base, *reg;
0384 
0385     mmdc_base = pmu_mmdc->mmdc_base;
0386     reg = mmdc_base + MMDC_MADPCR0;
0387 
0388     writel(PRF_FRZ, reg);
0389 
0390     reg = mmdc_base + MMDC_MADPCR1;
0391     writel(MMDC_PRF_AXI_ID_CLEAR, reg);
0392 
0393     mmdc_pmu_event_update(event);
0394 }
0395 
0396 static void mmdc_pmu_event_del(struct perf_event *event, int flags)
0397 {
0398     struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
0399     int cfg = event->attr.config;
0400 
0401     pmu_mmdc->mmdc_events[cfg] = NULL;
0402     pmu_mmdc->active_events--;
0403 
0404     if (pmu_mmdc->active_events == 0)
0405         hrtimer_cancel(&pmu_mmdc->hrtimer);
0406 
0407     mmdc_pmu_event_stop(event, PERF_EF_UPDATE);
0408 }
0409 
0410 static void mmdc_pmu_overflow_handler(struct mmdc_pmu *pmu_mmdc)
0411 {
0412     int i;
0413 
0414     for (i = 0; i < MMDC_NUM_COUNTERS; i++) {
0415         struct perf_event *event = pmu_mmdc->mmdc_events[i];
0416 
0417         if (event)
0418             mmdc_pmu_event_update(event);
0419     }
0420 }
0421 
0422 static enum hrtimer_restart mmdc_pmu_timer_handler(struct hrtimer *hrtimer)
0423 {
0424     struct mmdc_pmu *pmu_mmdc = container_of(hrtimer, struct mmdc_pmu,
0425             hrtimer);
0426 
0427     mmdc_pmu_overflow_handler(pmu_mmdc);
0428     hrtimer_forward_now(hrtimer, mmdc_pmu_timer_period());
0429 
0430     return HRTIMER_RESTART;
0431 }
0432 
0433 static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
0434         void __iomem *mmdc_base, struct device *dev)
0435 {
0436     int mmdc_num;
0437 
0438     *pmu_mmdc = (struct mmdc_pmu) {
0439         .pmu = (struct pmu) {
0440             .task_ctx_nr    = perf_invalid_context,
0441             .attr_groups    = attr_groups,
0442             .event_init     = mmdc_pmu_event_init,
0443             .add            = mmdc_pmu_event_add,
0444             .del            = mmdc_pmu_event_del,
0445             .start          = mmdc_pmu_event_start,
0446             .stop           = mmdc_pmu_event_stop,
0447             .read           = mmdc_pmu_event_update,
0448             .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
0449         },
0450         .mmdc_base = mmdc_base,
0451         .dev = dev,
0452         .active_events = 0,
0453     };
0454 
0455     mmdc_num = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL);
0456 
0457     return mmdc_num;
0458 }
0459 
0460 static int imx_mmdc_remove(struct platform_device *pdev)
0461 {
0462     struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev);
0463 
0464     cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
0465     perf_pmu_unregister(&pmu_mmdc->pmu);
0466     iounmap(pmu_mmdc->mmdc_base);
0467     clk_disable_unprepare(pmu_mmdc->mmdc_ipg_clk);
0468     kfree(pmu_mmdc);
0469     return 0;
0470 }
0471 
0472 static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base,
0473                   struct clk *mmdc_ipg_clk)
0474 {
0475     struct mmdc_pmu *pmu_mmdc;
0476     char *name;
0477     int mmdc_num;
0478     int ret;
0479     const struct of_device_id *of_id =
0480         of_match_device(imx_mmdc_dt_ids, &pdev->dev);
0481 
0482     pmu_mmdc = kzalloc(sizeof(*pmu_mmdc), GFP_KERNEL);
0483     if (!pmu_mmdc) {
0484         pr_err("failed to allocate PMU device!\n");
0485         return -ENOMEM;
0486     }
0487 
0488     /* The first instance registers the hotplug state */
0489     if (!cpuhp_mmdc_state) {
0490         ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
0491                           "perf/arm/mmdc:online", NULL,
0492                           mmdc_pmu_offline_cpu);
0493         if (ret < 0) {
0494             pr_err("cpuhp_setup_state_multi failed\n");
0495             goto pmu_free;
0496         }
0497         cpuhp_mmdc_state = ret;
0498     }
0499 
0500     mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
0501     pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
0502     if (mmdc_num == 0)
0503         name = "mmdc";
0504     else
0505         name = devm_kasprintf(&pdev->dev,
0506                 GFP_KERNEL, "mmdc%d", mmdc_num);
0507 
0508     pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data;
0509 
0510     hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC,
0511             HRTIMER_MODE_REL);
0512     pmu_mmdc->hrtimer.function = mmdc_pmu_timer_handler;
0513 
0514     cpumask_set_cpu(raw_smp_processor_id(), &pmu_mmdc->cpu);
0515 
0516     /* Register the pmu instance for cpu hotplug */
0517     cpuhp_state_add_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
0518 
0519     ret = perf_pmu_register(&(pmu_mmdc->pmu), name, -1);
0520     if (ret)
0521         goto pmu_register_err;
0522 
0523     platform_set_drvdata(pdev, pmu_mmdc);
0524     return 0;
0525 
0526 pmu_register_err:
0527     pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
0528     cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
0529     hrtimer_cancel(&pmu_mmdc->hrtimer);
0530 pmu_free:
0531     kfree(pmu_mmdc);
0532     return ret;
0533 }
0534 
0535 #else
0536 #define imx_mmdc_remove NULL
0537 #define imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk) 0
0538 #endif
0539 
0540 static int imx_mmdc_probe(struct platform_device *pdev)
0541 {
0542     struct device_node *np = pdev->dev.of_node;
0543     void __iomem *mmdc_base, *reg;
0544     struct clk *mmdc_ipg_clk;
0545     u32 val;
0546     int err;
0547 
0548     /* the ipg clock is optional */
0549     mmdc_ipg_clk = devm_clk_get(&pdev->dev, NULL);
0550     if (IS_ERR(mmdc_ipg_clk))
0551         mmdc_ipg_clk = NULL;
0552 
0553     err = clk_prepare_enable(mmdc_ipg_clk);
0554     if (err) {
0555         dev_err(&pdev->dev, "Unable to enable mmdc ipg clock.\n");
0556         return err;
0557     }
0558 
0559     mmdc_base = of_iomap(np, 0);
0560     WARN_ON(!mmdc_base);
0561 
0562     reg = mmdc_base + MMDC_MDMISC;
0563     /* Get ddr type */
0564     val = readl_relaxed(reg);
0565     ddr_type = (val & BM_MMDC_MDMISC_DDR_TYPE) >>
0566          BP_MMDC_MDMISC_DDR_TYPE;
0567 
0568     reg = mmdc_base + MMDC_MAPSR;
0569 
0570     /* Enable automatic power saving */
0571     val = readl_relaxed(reg);
0572     val &= ~(1 << BP_MMDC_MAPSR_PSD);
0573     writel_relaxed(val, reg);
0574 
0575     err = imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk);
0576     if (err) {
0577         iounmap(mmdc_base);
0578         clk_disable_unprepare(mmdc_ipg_clk);
0579     }
0580 
0581     return err;
0582 }
0583 
0584 int imx_mmdc_get_ddr_type(void)
0585 {
0586     return ddr_type;
0587 }
0588 
0589 static struct platform_driver imx_mmdc_driver = {
0590     .driver     = {
0591         .name   = "imx-mmdc",
0592         .of_match_table = imx_mmdc_dt_ids,
0593     },
0594     .probe      = imx_mmdc_probe,
0595     .remove     = imx_mmdc_remove,
0596 };
0597 
0598 static int __init imx_mmdc_init(void)
0599 {
0600     return platform_driver_register(&imx_mmdc_driver);
0601 }
0602 postcore_initcall(imx_mmdc_init);