0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/kernel.h>
0012 #include <linux/module.h>
0013 #include <linux/platform_device.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/slab.h>
0016 #include <linux/err.h>
0017 #include <linux/io.h>
0018 #include <linux/sched.h>
0019 #include <linux/pm_runtime.h>
0020 #include <linux/of.h>
0021
0022 #include <linux/w1.h>
0023
0024 #define MOD_NAME "OMAP_HDQ:"
0025
0026 #define OMAP_HDQ_REVISION 0x00
0027 #define OMAP_HDQ_TX_DATA 0x04
0028 #define OMAP_HDQ_RX_DATA 0x08
0029 #define OMAP_HDQ_CTRL_STATUS 0x0c
0030 #define OMAP_HDQ_CTRL_STATUS_SINGLE BIT(7)
0031 #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK BIT(6)
0032 #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE BIT(5)
0033 #define OMAP_HDQ_CTRL_STATUS_GO BIT(4)
0034 #define OMAP_HDQ_CTRL_STATUS_PRESENCE BIT(3)
0035 #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION BIT(2)
0036 #define OMAP_HDQ_CTRL_STATUS_DIR BIT(1)
0037 #define OMAP_HDQ_INT_STATUS 0x10
0038 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE BIT(2)
0039 #define OMAP_HDQ_INT_STATUS_RXCOMPLETE BIT(1)
0040 #define OMAP_HDQ_INT_STATUS_TIMEOUT BIT(0)
0041
0042 #define OMAP_HDQ_FLAG_CLEAR 0
0043 #define OMAP_HDQ_FLAG_SET 1
0044 #define OMAP_HDQ_TIMEOUT (HZ/5)
0045
0046 #define OMAP_HDQ_MAX_USER 4
0047
0048 static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
0049
0050 static int w1_id;
0051 module_param(w1_id, int, S_IRUSR);
0052 MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode");
0053
0054 struct hdq_data {
0055 struct device *dev;
0056 void __iomem *hdq_base;
0057
0058 struct mutex hdq_mutex;
0059
0060 u8 hdq_irqstatus;
0061 spinlock_t hdq_spinlock;
0062
0063 int mode;
0064
0065 };
0066
0067
0068 static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
0069 {
0070 return __raw_readl(hdq_data->hdq_base + offset);
0071 }
0072
0073 static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
0074 {
0075 __raw_writel(val, hdq_data->hdq_base + offset);
0076 }
0077
0078 static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
0079 u8 val, u8 mask)
0080 {
0081 u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
0082 | (val & mask);
0083 __raw_writel(new_val, hdq_data->hdq_base + offset);
0084
0085 return new_val;
0086 }
0087
0088
0089
0090
0091
0092
0093
0094 static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
0095 u8 flag, u8 flag_set, u8 *status)
0096 {
0097 int ret = 0;
0098 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
0099
0100 if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
0101
0102 while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
0103 && time_before(jiffies, timeout)) {
0104 schedule_timeout_uninterruptible(1);
0105 }
0106 if (*status & flag)
0107 ret = -ETIMEDOUT;
0108 } else if (flag_set == OMAP_HDQ_FLAG_SET) {
0109
0110 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
0111 && time_before(jiffies, timeout)) {
0112 schedule_timeout_uninterruptible(1);
0113 }
0114 if (!(*status & flag))
0115 ret = -ETIMEDOUT;
0116 } else
0117 return -EINVAL;
0118
0119 return ret;
0120 }
0121
0122
0123 static u8 hdq_reset_irqstatus(struct hdq_data *hdq_data, u8 bits)
0124 {
0125 unsigned long irqflags;
0126 u8 status;
0127
0128 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
0129 status = hdq_data->hdq_irqstatus;
0130
0131 hdq_data->hdq_irqstatus &= ~bits;
0132 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
0133
0134 return status;
0135 }
0136
0137
0138 static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
0139 {
0140 int ret;
0141 u8 tmp_status;
0142
0143 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
0144 if (ret < 0) {
0145 ret = -EINTR;
0146 goto rtn;
0147 }
0148
0149 if (hdq_data->hdq_irqstatus)
0150 dev_err(hdq_data->dev, "TX irqstatus not cleared (%02x)\n",
0151 hdq_data->hdq_irqstatus);
0152
0153 *status = 0;
0154
0155 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
0156
0157
0158 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
0159 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
0160
0161 ret = wait_event_timeout(hdq_wait_queue,
0162 (hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
0163 OMAP_HDQ_TIMEOUT);
0164 *status = hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TXCOMPLETE);
0165 if (ret == 0) {
0166 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
0167 ret = -ETIMEDOUT;
0168 goto out;
0169 }
0170
0171
0172 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
0173 dev_dbg(hdq_data->dev, "timeout waiting for"
0174 " TXCOMPLETE/RXCOMPLETE, %x\n", *status);
0175 ret = -ETIMEDOUT;
0176 goto out;
0177 }
0178
0179
0180 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
0181 OMAP_HDQ_CTRL_STATUS_GO,
0182 OMAP_HDQ_FLAG_CLEAR, &tmp_status);
0183 if (ret) {
0184 dev_dbg(hdq_data->dev, "timeout waiting GO bit"
0185 " return to zero, %x\n", tmp_status);
0186 }
0187
0188 out:
0189 mutex_unlock(&hdq_data->hdq_mutex);
0190 rtn:
0191 return ret;
0192 }
0193
0194
0195 static irqreturn_t hdq_isr(int irq, void *_hdq)
0196 {
0197 struct hdq_data *hdq_data = _hdq;
0198 unsigned long irqflags;
0199
0200 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
0201 hdq_data->hdq_irqstatus |= hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
0202 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
0203 dev_dbg(hdq_data->dev, "hdq_isr: %x\n", hdq_data->hdq_irqstatus);
0204
0205 if (hdq_data->hdq_irqstatus &
0206 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
0207 | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
0208
0209 wake_up(&hdq_wait_queue);
0210 }
0211
0212 return IRQ_HANDLED;
0213 }
0214
0215
0216 static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
0217 u8 search_type, w1_slave_found_callback slave_found)
0218 {
0219 u64 module_id, rn_le, cs, id;
0220
0221 if (w1_id)
0222 module_id = w1_id;
0223 else
0224 module_id = 0x1;
0225
0226 rn_le = cpu_to_le64(module_id);
0227
0228
0229
0230
0231 cs = w1_calc_crc8((u8 *)&rn_le, 7);
0232 id = (cs << 56) | module_id;
0233
0234 slave_found(master_dev, id);
0235 }
0236
0237
0238 static int omap_hdq_break(struct hdq_data *hdq_data)
0239 {
0240 int ret = 0;
0241 u8 tmp_status;
0242
0243 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
0244 if (ret < 0) {
0245 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
0246 ret = -EINTR;
0247 goto rtn;
0248 }
0249
0250 if (hdq_data->hdq_irqstatus)
0251 dev_err(hdq_data->dev, "break irqstatus not cleared (%02x)\n",
0252 hdq_data->hdq_irqstatus);
0253
0254
0255 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
0256 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
0257 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
0258 OMAP_HDQ_CTRL_STATUS_GO);
0259
0260
0261 ret = wait_event_timeout(hdq_wait_queue,
0262 (hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_TIMEOUT),
0263 OMAP_HDQ_TIMEOUT);
0264 tmp_status = hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TIMEOUT);
0265 if (ret == 0) {
0266 dev_dbg(hdq_data->dev, "break wait elapsed\n");
0267 ret = -EINTR;
0268 goto out;
0269 }
0270
0271
0272 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
0273 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x\n",
0274 tmp_status);
0275 ret = -ETIMEDOUT;
0276 goto out;
0277 }
0278
0279
0280
0281
0282
0283 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_CTRL_STATUS) &
0284 OMAP_HDQ_CTRL_STATUS_PRESENCE)) {
0285 dev_dbg(hdq_data->dev, "Presence bit not set\n");
0286 ret = -ETIMEDOUT;
0287 goto out;
0288 }
0289
0290
0291
0292
0293
0294 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
0295 OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
0296 OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
0297 &tmp_status);
0298 if (ret)
0299 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
0300 " return to zero, %x\n", tmp_status);
0301
0302 out:
0303 mutex_unlock(&hdq_data->hdq_mutex);
0304 rtn:
0305 return ret;
0306 }
0307
0308 static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
0309 {
0310 int ret = 0;
0311 u8 status;
0312
0313 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
0314 if (ret < 0) {
0315 ret = -EINTR;
0316 goto rtn;
0317 }
0318
0319 if (pm_runtime_suspended(hdq_data->dev)) {
0320 ret = -EINVAL;
0321 goto out;
0322 }
0323
0324 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
0325 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
0326 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
0327 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
0328
0329
0330
0331 wait_event_timeout(hdq_wait_queue,
0332 (hdq_data->hdq_irqstatus
0333 & (OMAP_HDQ_INT_STATUS_RXCOMPLETE |
0334 OMAP_HDQ_INT_STATUS_TIMEOUT)),
0335 OMAP_HDQ_TIMEOUT);
0336 status = hdq_reset_irqstatus(hdq_data,
0337 OMAP_HDQ_INT_STATUS_RXCOMPLETE |
0338 OMAP_HDQ_INT_STATUS_TIMEOUT);
0339 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
0340 OMAP_HDQ_CTRL_STATUS_DIR);
0341
0342
0343 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
0344 dev_dbg(hdq_data->dev, "timeout waiting for"
0345 " RXCOMPLETE, %x", status);
0346 ret = -ETIMEDOUT;
0347 goto out;
0348 }
0349 } else {
0350 hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
0351 }
0352
0353 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
0354 out:
0355 mutex_unlock(&hdq_data->hdq_mutex);
0356 rtn:
0357 return ret;
0358
0359 }
0360
0361
0362
0363
0364
0365 static u8 omap_w1_triplet(void *_hdq, u8 bdir)
0366 {
0367 u8 id_bit, comp_bit;
0368 int err;
0369 u8 ret = 0x3;
0370 struct hdq_data *hdq_data = _hdq;
0371 u8 ctrl = OMAP_HDQ_CTRL_STATUS_SINGLE | OMAP_HDQ_CTRL_STATUS_GO |
0372 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK;
0373 u8 mask = ctrl | OMAP_HDQ_CTRL_STATUS_DIR;
0374
0375 err = pm_runtime_get_sync(hdq_data->dev);
0376 if (err < 0) {
0377 pm_runtime_put_noidle(hdq_data->dev);
0378
0379 return err;
0380 }
0381
0382 err = mutex_lock_interruptible(&hdq_data->hdq_mutex);
0383 if (err < 0) {
0384 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
0385 goto rtn;
0386 }
0387
0388
0389 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
0390 ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
0391 err = wait_event_timeout(hdq_wait_queue,
0392 (hdq_data->hdq_irqstatus
0393 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
0394 OMAP_HDQ_TIMEOUT);
0395
0396 hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
0397
0398 if (err == 0) {
0399 dev_dbg(hdq_data->dev, "RX wait elapsed\n");
0400 goto out;
0401 }
0402 id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
0403
0404
0405 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
0406 ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
0407 err = wait_event_timeout(hdq_wait_queue,
0408 (hdq_data->hdq_irqstatus
0409 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
0410 OMAP_HDQ_TIMEOUT);
0411
0412 hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
0413
0414 if (err == 0) {
0415 dev_dbg(hdq_data->dev, "RX wait elapsed\n");
0416 goto out;
0417 }
0418 comp_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
0419
0420 if (id_bit && comp_bit) {
0421 ret = 0x03;
0422 goto out;
0423 }
0424 if (!id_bit && !comp_bit) {
0425
0426 ret = bdir ? 0x04 : 0;
0427 } else {
0428
0429 bdir = id_bit;
0430 ret = id_bit ? 0x05 : 0x02;
0431 }
0432
0433
0434 hdq_reg_out(_hdq, OMAP_HDQ_TX_DATA, bdir);
0435 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, ctrl, mask);
0436 err = wait_event_timeout(hdq_wait_queue,
0437 (hdq_data->hdq_irqstatus
0438 & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
0439 OMAP_HDQ_TIMEOUT);
0440
0441 hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TXCOMPLETE);
0442
0443 if (err == 0) {
0444 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
0445 goto out;
0446 }
0447
0448 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, 0,
0449 OMAP_HDQ_CTRL_STATUS_SINGLE);
0450
0451 out:
0452 mutex_unlock(&hdq_data->hdq_mutex);
0453 rtn:
0454 pm_runtime_mark_last_busy(hdq_data->dev);
0455 pm_runtime_put_autosuspend(hdq_data->dev);
0456
0457 return ret;
0458 }
0459
0460
0461 static u8 omap_w1_reset_bus(void *_hdq)
0462 {
0463 struct hdq_data *hdq_data = _hdq;
0464 int err;
0465
0466 err = pm_runtime_get_sync(hdq_data->dev);
0467 if (err < 0) {
0468 pm_runtime_put_noidle(hdq_data->dev);
0469
0470 return err;
0471 }
0472
0473 omap_hdq_break(hdq_data);
0474
0475 pm_runtime_mark_last_busy(hdq_data->dev);
0476 pm_runtime_put_autosuspend(hdq_data->dev);
0477
0478 return 0;
0479 }
0480
0481
0482 static u8 omap_w1_read_byte(void *_hdq)
0483 {
0484 struct hdq_data *hdq_data = _hdq;
0485 u8 val = 0;
0486 int ret;
0487
0488 ret = pm_runtime_get_sync(hdq_data->dev);
0489 if (ret < 0) {
0490 pm_runtime_put_noidle(hdq_data->dev);
0491
0492 return -1;
0493 }
0494
0495 ret = hdq_read_byte(hdq_data, &val);
0496 if (ret)
0497 val = -1;
0498
0499 pm_runtime_mark_last_busy(hdq_data->dev);
0500 pm_runtime_put_autosuspend(hdq_data->dev);
0501
0502 return val;
0503 }
0504
0505
0506 static void omap_w1_write_byte(void *_hdq, u8 byte)
0507 {
0508 struct hdq_data *hdq_data = _hdq;
0509 int ret;
0510 u8 status;
0511
0512 ret = pm_runtime_get_sync(hdq_data->dev);
0513 if (ret < 0) {
0514 pm_runtime_put_noidle(hdq_data->dev);
0515
0516 return;
0517 }
0518
0519
0520
0521
0522
0523
0524 if (byte == W1_SKIP_ROM)
0525 omap_hdq_break(hdq_data);
0526
0527 ret = hdq_write_byte(hdq_data, byte, &status);
0528 if (ret < 0) {
0529 dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
0530 goto out_err;
0531 }
0532
0533 out_err:
0534 pm_runtime_mark_last_busy(hdq_data->dev);
0535 pm_runtime_put_autosuspend(hdq_data->dev);
0536 }
0537
0538 static struct w1_bus_master omap_w1_master = {
0539 .read_byte = omap_w1_read_byte,
0540 .write_byte = omap_w1_write_byte,
0541 .reset_bus = omap_w1_reset_bus,
0542 };
0543
0544 static int __maybe_unused omap_hdq_runtime_suspend(struct device *dev)
0545 {
0546 struct hdq_data *hdq_data = dev_get_drvdata(dev);
0547
0548 hdq_reg_out(hdq_data, 0, hdq_data->mode);
0549 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
0550
0551 return 0;
0552 }
0553
0554 static int __maybe_unused omap_hdq_runtime_resume(struct device *dev)
0555 {
0556 struct hdq_data *hdq_data = dev_get_drvdata(dev);
0557
0558
0559 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
0560 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
0561 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
0562 hdq_data->mode);
0563 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
0564
0565 return 0;
0566 }
0567
0568 static const struct dev_pm_ops omap_hdq_pm_ops = {
0569 SET_RUNTIME_PM_OPS(omap_hdq_runtime_suspend,
0570 omap_hdq_runtime_resume, NULL)
0571 };
0572
0573 static int omap_hdq_probe(struct platform_device *pdev)
0574 {
0575 struct device *dev = &pdev->dev;
0576 struct hdq_data *hdq_data;
0577 int ret, irq;
0578 u8 rev;
0579 const char *mode;
0580
0581 hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL);
0582 if (!hdq_data) {
0583 dev_dbg(&pdev->dev, "unable to allocate memory\n");
0584 return -ENOMEM;
0585 }
0586
0587 hdq_data->dev = dev;
0588 platform_set_drvdata(pdev, hdq_data);
0589
0590 hdq_data->hdq_base = devm_platform_ioremap_resource(pdev, 0);
0591 if (IS_ERR(hdq_data->hdq_base))
0592 return PTR_ERR(hdq_data->hdq_base);
0593
0594 mutex_init(&hdq_data->hdq_mutex);
0595
0596 ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode);
0597 if (ret < 0 || !strcmp(mode, "hdq")) {
0598 hdq_data->mode = 0;
0599 omap_w1_master.search = omap_w1_search_bus;
0600 } else {
0601 hdq_data->mode = 1;
0602 omap_w1_master.triplet = omap_w1_triplet;
0603 }
0604
0605 pm_runtime_enable(&pdev->dev);
0606 pm_runtime_use_autosuspend(&pdev->dev);
0607 pm_runtime_set_autosuspend_delay(&pdev->dev, 300);
0608 ret = pm_runtime_get_sync(&pdev->dev);
0609 if (ret < 0) {
0610 pm_runtime_put_noidle(&pdev->dev);
0611 dev_dbg(&pdev->dev, "pm_runtime_get_sync failed\n");
0612 goto err_w1;
0613 }
0614
0615 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
0616 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
0617 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
0618
0619 spin_lock_init(&hdq_data->hdq_spinlock);
0620
0621 irq = platform_get_irq(pdev, 0);
0622 if (irq < 0) {
0623 dev_dbg(&pdev->dev, "Failed to get IRQ: %d\n", irq);
0624 ret = irq;
0625 goto err_irq;
0626 }
0627
0628 ret = devm_request_irq(dev, irq, hdq_isr, 0, "omap_hdq", hdq_data);
0629 if (ret < 0) {
0630 dev_dbg(&pdev->dev, "could not request irq\n");
0631 goto err_irq;
0632 }
0633
0634 omap_hdq_break(hdq_data);
0635
0636 pm_runtime_mark_last_busy(&pdev->dev);
0637 pm_runtime_put_autosuspend(&pdev->dev);
0638
0639 omap_w1_master.data = hdq_data;
0640
0641 ret = w1_add_master_device(&omap_w1_master);
0642 if (ret) {
0643 dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
0644 goto err_w1;
0645 }
0646
0647 return 0;
0648
0649 err_irq:
0650 pm_runtime_put_sync(&pdev->dev);
0651 err_w1:
0652 pm_runtime_dont_use_autosuspend(&pdev->dev);
0653 pm_runtime_disable(&pdev->dev);
0654
0655 return ret;
0656 }
0657
0658 static int omap_hdq_remove(struct platform_device *pdev)
0659 {
0660 int active;
0661
0662 active = pm_runtime_get_sync(&pdev->dev);
0663 if (active < 0)
0664 pm_runtime_put_noidle(&pdev->dev);
0665
0666 w1_remove_master_device(&omap_w1_master);
0667
0668 pm_runtime_dont_use_autosuspend(&pdev->dev);
0669 if (active >= 0)
0670 pm_runtime_put_sync(&pdev->dev);
0671 pm_runtime_disable(&pdev->dev);
0672
0673 return 0;
0674 }
0675
0676 static const struct of_device_id omap_hdq_dt_ids[] = {
0677 { .compatible = "ti,omap3-1w" },
0678 { .compatible = "ti,am4372-hdq" },
0679 {}
0680 };
0681 MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
0682
0683 static struct platform_driver omap_hdq_driver = {
0684 .probe = omap_hdq_probe,
0685 .remove = omap_hdq_remove,
0686 .driver = {
0687 .name = "omap_hdq",
0688 .of_match_table = omap_hdq_dt_ids,
0689 .pm = &omap_hdq_pm_ops,
0690 },
0691 };
0692 module_platform_driver(omap_hdq_driver);
0693
0694 MODULE_AUTHOR("Texas Instruments");
0695 MODULE_DESCRIPTION("HDQ-1W driver Library");
0696 MODULE_LICENSE("GPL");