#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/math64.h>
#define AD4170_FILTER_FS_REG(x) (0xC7 + 14 * (x))
#define AD4170_OFFSET_REG(x) (0xCA + 14 * (x))
#define AD4170_GAIN_REG(x) (0xCD + 14 * (x))
+#define AD4170_ADC_CTRL_CONT_READ_EXIT_REG 0x200 /* virtual reg */
#define AD4170_REG_READ_MASK BIT(14)
/* AD4170_ADC_CTRL_REG */
#define AD4170_ADC_CTRL_MULTI_DATA_REG_SEL_MSK BIT(7)
+#define AD4170_ADC_CTRL_CONT_READ_MSK GENMASK(5, 4)
#define AD4170_ADC_CTRL_MODE_MSK GENMASK(3, 0)
/* AD4170_CHAN_EN_REG */
#define AD4170_PIN_MUXING_DIG_AUX1_RDY 0x1
/* AD4170_ADC_CTRL_REG constants */
+#define AD4170_ADC_CTRL_MODE_CONT 0x0
#define AD4170_ADC_CTRL_MODE_SINGLE 0x4
#define AD4170_ADC_CTRL_MODE_IDLE 0x7
+#define AD4170_ADC_CTRL_CONT_READ_DISABLE 0x0
+#define AD4170_ADC_CTRL_CONT_READ_ENABLE 0x1
+
/* AD4170_FILTER_REG constants */
#define AD4170_FILTER_FILTER_TYPE_SINC5_AVG 0x0
#define AD4170_FILTER_FILTER_TYPE_SINC5 0x4
#define AD4170_GAIN_REG_DEFAULT 0x555555
+#define AD4170_ADC_CTRL_CONT_READ_EXIT 0xA5
+
static const unsigned int ad4170_reg_size[] = {
[AD4170_CONFIG_A_REG] = 1,
[AD4170_DATA_24B_REG] = 3,
[AD4170_OFFSET_REG(5) ... AD4170_GAIN_REG(5)] = 3,
[AD4170_OFFSET_REG(6) ... AD4170_GAIN_REG(6)] = 3,
[AD4170_OFFSET_REG(7) ... AD4170_GAIN_REG(7)] = 3,
+ [AD4170_ADC_CTRL_CONT_READ_EXIT_REG] = 0,
};
enum ad4170_ref_buf {
struct spi_device *spi;
struct regmap *regmap;
int sps_tbl[ARRAY_SIZE(ad4170_filt_names)][AD4170_MAX_FS_TBL_SIZE][2];
+ __be32 bounce_buffer[AD4170_MAX_ADC_CHANNELS];
+ struct spi_message msg;
+ struct spi_transfer xfer;
+ struct iio_trigger *trig;
unsigned int pins_fn[AD4170_NUM_ANALOG_PINS];
/*
* DMA (thus cache coherency maintenance) requires the transfer buffers
case 1:
tx_buf[AD4170_SPI_INST_PHASE_LEN] = val;
break;
+ case 0:
+ /* Write continuous read exit code */
+ tx_buf[0] = AD4170_ADC_CTRL_CONT_READ_EXIT;
+ return spi_write_then_read(st->spi, tx_buf, 1, NULL, 0);
default:
return -EINVAL;
}
.scan_type = {
.realbits = 24,
.storagebits = 32,
+ .shift = 8,
.endianness = IIO_BE,
},
};
}
}
+static int ad4170_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *active_scan_mask)
+{
+ struct ad4170_state *st = iio_priv(indio_dev);
+ unsigned int chan_index;
+ int ret;
+
+ iio_for_each_active_channel(indio_dev, chan_index) {
+ ret = ad4170_set_channel_enable(st, chan_index, true);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static const struct iio_info ad4170_info = {
.read_raw = ad4170_read_raw,
.read_avail = ad4170_read_avail,
.write_raw = ad4170_write_raw,
.write_raw_get_fmt = ad4170_write_raw_get_fmt,
+ .update_scan_mode = ad4170_update_scan_mode,
.debugfs_reg_access = ad4170_debugfs_reg_access,
};
AD4170_ADC_CTRL_MULTI_DATA_REG_SEL_MSK);
}
+static int ad4170_prepare_spi_message(struct ad4170_state *st)
+{
+ /*
+ * Continuous data register read is enabled on buffer postenable so
+ * no instruction phase is needed meaning we don't need to send the
+ * register address to read data. Transfer only needs the read buffer.
+ */
+ st->xfer.rx_buf = &st->rx_buf;
+ st->xfer.len = BITS_TO_BYTES(ad4170_channel_template.scan_type.realbits);
+
+ spi_message_init_with_transfers(&st->msg, &st->xfer, 1);
+
+ return devm_spi_optimize_message(&st->spi->dev, st->spi, &st->msg);
+}
+
+static int ad4170_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct ad4170_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = regmap_update_bits(st->regmap, AD4170_ADC_CTRL_REG,
+ AD4170_ADC_CTRL_MODE_MSK,
+ FIELD_PREP(AD4170_ADC_CTRL_MODE_MSK,
+ AD4170_ADC_CTRL_MODE_CONT));
+ if (ret)
+ return ret;
+
+ /*
+ * This enables continuous read of the ADC data register. The ADC must
+ * be in continuous conversion mode.
+ */
+ return regmap_update_bits(st->regmap, AD4170_ADC_CTRL_REG,
+ AD4170_ADC_CTRL_CONT_READ_MSK,
+ FIELD_PREP(AD4170_ADC_CTRL_CONT_READ_MSK,
+ AD4170_ADC_CTRL_CONT_READ_ENABLE));
+}
+
+static int ad4170_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct ad4170_state *st = iio_priv(indio_dev);
+ unsigned int i;
+ int ret;
+
+ /*
+ * Use a high register address (virtual register) to request a write of
+ * 0xA5 to the ADC during the first 8 SCLKs of the ADC data read cycle,
+ * thus exiting continuous read.
+ */
+ ret = regmap_write(st->regmap, AD4170_ADC_CTRL_CONT_READ_EXIT_REG, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap, AD4170_ADC_CTRL_REG,
+ AD4170_ADC_CTRL_CONT_READ_MSK,
+ FIELD_PREP(AD4170_ADC_CTRL_CONT_READ_MSK,
+ AD4170_ADC_CTRL_CONT_READ_DISABLE));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap, AD4170_ADC_CTRL_REG,
+ AD4170_ADC_CTRL_MODE_MSK,
+ FIELD_PREP(AD4170_ADC_CTRL_MODE_MSK,
+ AD4170_ADC_CTRL_MODE_IDLE));
+ if (ret)
+ return ret;
+
+ /*
+ * The ADC sequences through all the enabled channels (see datasheet
+ * page 95). That can lead to incorrect channel being read if a
+ * single-shot read (or buffered read with different active_scan_mask)
+ * is done after buffer disable. Disable all channels so only requested
+ * channels will be read.
+ */
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ ret = ad4170_set_channel_enable(st, i, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool ad4170_validate_scan_mask(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ unsigned int masklength = iio_get_masklength(indio_dev);
+ unsigned int enabled;
+
+ /*
+ * The channel sequencer cycles through the enabled channels in
+ * sequential order, from channel 0 to channel 15, bypassing disabled
+ * channels. When more than one channel is enabled, channel 0 must
+ * always be enabled. See datasheet channel_en register description at
+ * page 95.
+ */
+ enabled = bitmap_weight(scan_mask, masklength);
+ if (enabled > 1)
+ return test_bit(0, scan_mask);
+
+ return enabled == 1;
+}
+
+static const struct iio_buffer_setup_ops ad4170_buffer_ops = {
+ .postenable = ad4170_buffer_postenable,
+ .predisable = ad4170_buffer_predisable,
+ .validate_scan_mask = ad4170_validate_scan_mask,
+};
+
+static irqreturn_t ad4170_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad4170_state *st = iio_priv(indio_dev);
+ unsigned int chan_index;
+ unsigned int i = 0;
+ int ret;
+
+ iio_for_each_active_channel(indio_dev, chan_index) {
+ ret = spi_sync(st->spi, &st->msg);
+ if (ret)
+ goto err_out;
+
+ memcpy(&st->bounce_buffer[i++], st->rx_buf, ARRAY_SIZE(st->rx_buf));
+ }
+
+ iio_push_to_buffers(indio_dev, st->bounce_buffer);
+err_out:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static const struct iio_trigger_ops ad4170_trigger_ops = {
+ .validate_device = iio_trigger_validate_own_device,
+};
+
static irqreturn_t ad4170_irq_handler(int irq, void *dev_id)
{
struct iio_dev *indio_dev = dev_id;
struct ad4170_state *st = iio_priv(indio_dev);
- complete(&st->completion);
+ if (iio_buffer_enabled(indio_dev))
+ iio_trigger_poll(st->trig);
+ else
+ complete(&st->completion);
return IRQ_HANDLED;
};
+static int ad4170_trigger_setup(struct iio_dev *indio_dev)
+{
+ struct ad4170_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->spi->dev;
+ int ret;
+
+ st->trig = devm_iio_trigger_alloc(dev, "%s-trig%d",
+ indio_dev->name,
+ iio_device_id(indio_dev));
+ if (!st->trig)
+ return -ENOMEM;
+
+ st->trig->ops = &ad4170_trigger_ops;
+
+ iio_trigger_set_drvdata(st->trig, indio_dev);
+ ret = devm_iio_trigger_register(dev, st->trig);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register trigger\n");
+
+ indio_dev->trig = iio_trigger_get(st->trig);
+
+ return 0;
+}
+
static int ad4170_regulator_setup(struct ad4170_state *st)
{
struct device *dev = &st->spi->dev;
IRQF_ONESHOT, indio_dev->name, indio_dev);
if (ret)
return ret;
+
+ ret = ad4170_trigger_setup(indio_dev);
+ if (ret)
+ return ret;
}
+ ret = ad4170_prepare_spi_message(st);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to prepare SPI message\n");
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ &ad4170_trigger_handler,
+ &ad4170_buffer_ops);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to setup read buffer\n");
+
return devm_iio_device_register(dev, indio_dev);
}