struct iio_dev *indio_dev = data;
/* Handle data ready interrupt from C4/EVENT/DREADY pin */
- if (!iio_device_claim_buffer_mode(indio_dev)) {
+ if (iio_device_try_claim_buffer_mode(indio_dev)) {
ade9000_iio_push_buffer(indio_dev);
iio_device_release_buffer_mode(indio_dev);
}
/*
* Ignore samples if the buffer is not set: it is needed if the ODR is
* set but the buffer is not enabled yet.
- *
- * Note: iio_device_claim_buffer_mode() returns -EBUSY if the buffer
- * is not enabled.
*/
- if (iio_device_claim_buffer_mode(indio_dev) < 0)
+ if (!iio_device_try_claim_buffer_mode(indio_dev))
return 0;
out = (s16 *)st->samples;
* Temperature reading can only be acquired while engine
* is running
*/
- if (iio_device_claim_buffer_mode(indio_dev)) {
- /*
- * Replacing -EBUSY or other error code
- * returned by iio_device_claim_buffer_mode()
- * because user space may rely on the current
- * one.
- */
+ if (!iio_device_try_claim_buffer_mode(indio_dev)) {
ret = -EAGAIN;
} else {
ret = max30100_get_temp(data, val);
* shutdown; leave shutdown briefly when buffer not running
*/
any_mode_retry:
- if (iio_device_claim_buffer_mode(indio_dev)) {
+ if (!iio_device_try_claim_buffer_mode(indio_dev)) {
/*
* This one is a *bit* hacky. If we cannot claim buffer
* mode, then try direct mode so that we make sure
*
* There are very few cases where a driver actually needs to lock the current
* mode unconditionally. It's recommended to use iio_device_claim_direct() or
- * iio_device_claim_buffer_mode() pairs or related helpers instead.
+ * iio_device_try_claim_buffer_mode() pairs or related helpers instead.
*/
void __iio_dev_mode_lock(struct iio_dev *indio_dev)
{
}
EXPORT_SYMBOL_GPL(__iio_dev_mode_unlock);
-/**
- * iio_device_claim_buffer_mode - Keep device in buffer mode
- * @indio_dev: the iio_dev associated with the device
- *
- * If the device is in buffer mode it is guaranteed to stay
- * that way until iio_device_release_buffer_mode() is called.
- *
- * Use with iio_device_release_buffer_mode().
- *
- * Returns: 0 on success, -EBUSY on failure.
- */
-int iio_device_claim_buffer_mode(struct iio_dev *indio_dev)
-{
- struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
-
- mutex_lock(&iio_dev_opaque->mlock);
-
- if (iio_buffer_enabled(indio_dev))
- return 0;
-
- mutex_unlock(&iio_dev_opaque->mlock);
- return -EBUSY;
-}
-EXPORT_SYMBOL_GPL(iio_device_claim_buffer_mode);
-
-/**
- * iio_device_release_buffer_mode - releases claim on buffer mode
- * @indio_dev: the iio_dev associated with the device
- *
- * Release the claim. Device is no longer guaranteed to stay
- * in buffer mode.
- *
- * Use with iio_device_claim_buffer_mode().
- */
-void iio_device_release_buffer_mode(struct iio_dev *indio_dev)
-{
- mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock);
-}
-EXPORT_SYMBOL_GPL(iio_device_release_buffer_mode);
-
/**
* iio_device_get_current_mode() - helper function providing read-only access to
* the opaque @currentmode variable
struct opt4060_chip *chip = iio_priv(indio_dev);
int ret = 0;
any_mode_retry:
- if (iio_device_claim_buffer_mode(indio_dev)) {
+ if (!iio_device_try_claim_buffer_mode(indio_dev)) {
/*
* This one is a *bit* hacky. If we cannot claim buffer mode,
* then try direct mode so that we make sure things cannot
*/
#define iio_device_release_direct(indio_dev) __iio_dev_mode_unlock(indio_dev)
-int iio_device_claim_buffer_mode(struct iio_dev *indio_dev);
-void iio_device_release_buffer_mode(struct iio_dev *indio_dev);
+/**
+ * iio_device_try_claim_buffer_mode() - Keep device in buffer mode
+ * @indio_dev: the iio_dev associated with the device
+ *
+ * If the device is in buffer mode it is guaranteed to stay
+ * that way until iio_device_release_buffer_mode() is called.
+ *
+ * Use with iio_device_release_buffer_mode().
+ *
+ * Returns: true on success, false on failure.
+ */
+static inline bool iio_device_try_claim_buffer_mode(struct iio_dev *indio_dev)
+{
+ __iio_dev_mode_lock(indio_dev);
+
+ if (!iio_buffer_enabled(indio_dev)) {
+ __iio_dev_mode_unlock(indio_dev);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * iio_device_release_buffer_mode() - releases claim on buffer mode
+ * @indio_dev: the iio_dev associated with the device
+ *
+ * Release the claim. Device is no longer guaranteed to stay
+ * in buffer mode.
+ *
+ * Use with iio_device_try_claim_buffer_mode().
+ */
+#define iio_device_release_buffer_mode(indio_dev) __iio_dev_mode_unlock(indio_dev)
extern const struct bus_type iio_bus_type;