0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/kernel.h>
0011 #include <linux/iio/iio.h>
0012 #include <linux/iio/trigger.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/regmap.h>
0015 #include <linux/iio/common/st_sensors.h>
0016 #include "st_sensors_core.h"
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027 static bool st_sensors_new_samples_available(struct iio_dev *indio_dev,
0028 struct st_sensor_data *sdata)
0029 {
0030 int ret, status;
0031
0032
0033 if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr)
0034 return true;
0035
0036
0037 if (!indio_dev->active_scan_mask)
0038 return false;
0039
0040 ret = regmap_read(sdata->regmap,
0041 sdata->sensor_settings->drdy_irq.stat_drdy.addr,
0042 &status);
0043 if (ret < 0) {
0044 dev_err(indio_dev->dev.parent,
0045 "error checking samples available\n");
0046 return false;
0047 }
0048
0049 return !!(status & sdata->sensor_settings->drdy_irq.stat_drdy.mask);
0050 }
0051
0052
0053
0054
0055
0056
0057 static irqreturn_t st_sensors_irq_handler(int irq, void *p)
0058 {
0059 struct iio_trigger *trig = p;
0060 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
0061 struct st_sensor_data *sdata = iio_priv(indio_dev);
0062
0063
0064 sdata->hw_timestamp = iio_get_time_ns(indio_dev);
0065 return IRQ_WAKE_THREAD;
0066 }
0067
0068
0069
0070
0071
0072
0073 static irqreturn_t st_sensors_irq_thread(int irq, void *p)
0074 {
0075 struct iio_trigger *trig = p;
0076 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
0077 struct st_sensor_data *sdata = iio_priv(indio_dev);
0078
0079
0080
0081
0082
0083
0084
0085
0086 if (sdata->hw_irq_trigger &&
0087 st_sensors_new_samples_available(indio_dev, sdata)) {
0088 iio_trigger_poll_chained(p);
0089 } else {
0090 dev_dbg(indio_dev->dev.parent, "spurious IRQ\n");
0091 return IRQ_NONE;
0092 }
0093
0094
0095
0096
0097
0098
0099 if (!sdata->edge_irq)
0100 return IRQ_HANDLED;
0101
0102
0103
0104
0105
0106
0107
0108 while (sdata->hw_irq_trigger &&
0109 st_sensors_new_samples_available(indio_dev, sdata)) {
0110 dev_dbg(indio_dev->dev.parent,
0111 "more samples came in during polling\n");
0112 sdata->hw_timestamp = iio_get_time_ns(indio_dev);
0113 iio_trigger_poll_chained(p);
0114 }
0115
0116 return IRQ_HANDLED;
0117 }
0118
0119 int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
0120 const struct iio_trigger_ops *trigger_ops)
0121 {
0122 struct st_sensor_data *sdata = iio_priv(indio_dev);
0123 struct device *parent = indio_dev->dev.parent;
0124 unsigned long irq_trig;
0125 int err;
0126
0127 sdata->trig = devm_iio_trigger_alloc(parent, "%s-trigger",
0128 indio_dev->name);
0129 if (sdata->trig == NULL) {
0130 dev_err(&indio_dev->dev, "failed to allocate iio trigger.\n");
0131 return -ENOMEM;
0132 }
0133
0134 iio_trigger_set_drvdata(sdata->trig, indio_dev);
0135 sdata->trig->ops = trigger_ops;
0136
0137 irq_trig = irqd_get_trigger_type(irq_get_irq_data(sdata->irq));
0138
0139
0140
0141
0142 switch(irq_trig) {
0143 case IRQF_TRIGGER_FALLING:
0144 case IRQF_TRIGGER_LOW:
0145 if (!sdata->sensor_settings->drdy_irq.addr_ihl) {
0146 dev_err(&indio_dev->dev,
0147 "falling/low specified for IRQ but hardware supports only rising/high: will request rising/high\n");
0148 if (irq_trig == IRQF_TRIGGER_FALLING)
0149 irq_trig = IRQF_TRIGGER_RISING;
0150 if (irq_trig == IRQF_TRIGGER_LOW)
0151 irq_trig = IRQF_TRIGGER_HIGH;
0152 } else {
0153
0154 err = st_sensors_write_data_with_mask(indio_dev,
0155 sdata->sensor_settings->drdy_irq.addr_ihl,
0156 sdata->sensor_settings->drdy_irq.mask_ihl, 1);
0157 if (err < 0)
0158 return err;
0159 dev_info(&indio_dev->dev,
0160 "interrupts on the falling edge or active low level\n");
0161 }
0162 break;
0163 case IRQF_TRIGGER_RISING:
0164 dev_info(&indio_dev->dev,
0165 "interrupts on the rising edge\n");
0166 break;
0167 case IRQF_TRIGGER_HIGH:
0168 dev_info(&indio_dev->dev,
0169 "interrupts active high level\n");
0170 break;
0171 default:
0172
0173 dev_err(&indio_dev->dev,
0174 "unsupported IRQ trigger specified (%lx), enforce rising edge\n", irq_trig);
0175 irq_trig = IRQF_TRIGGER_RISING;
0176 }
0177
0178
0179 if (irq_trig == IRQF_TRIGGER_FALLING ||
0180 irq_trig == IRQF_TRIGGER_RISING) {
0181 if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) {
0182 dev_err(&indio_dev->dev,
0183 "edge IRQ not supported w/o stat register.\n");
0184 return -EOPNOTSUPP;
0185 }
0186 sdata->edge_irq = true;
0187 } else {
0188
0189
0190
0191
0192
0193
0194 irq_trig |= IRQF_ONESHOT;
0195 }
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205 if (sdata->int_pin_open_drain &&
0206 sdata->sensor_settings->drdy_irq.stat_drdy.addr)
0207 irq_trig |= IRQF_SHARED;
0208
0209 err = devm_request_threaded_irq(parent,
0210 sdata->irq,
0211 st_sensors_irq_handler,
0212 st_sensors_irq_thread,
0213 irq_trig,
0214 sdata->trig->name,
0215 sdata->trig);
0216 if (err) {
0217 dev_err(&indio_dev->dev, "failed to request trigger IRQ.\n");
0218 return err;
0219 }
0220
0221 err = devm_iio_trigger_register(parent, sdata->trig);
0222 if (err < 0) {
0223 dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
0224 return err;
0225 }
0226 indio_dev->trig = iio_trigger_get(sdata->trig);
0227
0228 return 0;
0229 }
0230 EXPORT_SYMBOL_NS(st_sensors_allocate_trigger, IIO_ST_SENSORS);
0231
0232 int st_sensors_validate_device(struct iio_trigger *trig,
0233 struct iio_dev *indio_dev)
0234 {
0235 struct iio_dev *indio = iio_trigger_get_drvdata(trig);
0236
0237 if (indio != indio_dev)
0238 return -EINVAL;
0239
0240 return 0;
0241 }
0242 EXPORT_SYMBOL_NS(st_sensors_validate_device, IIO_ST_SENSORS);