mhi_cntrl->runtime_put(mhi_cntrl);
}
- /*
- * Recycle the buffer if buffer is pre-allocated,
- * if there is an error, not much we can do apart
- * from dropping the packet
- */
- if (mhi_chan->pre_alloc) {
- if (mhi_queue_buf(mhi_chan->mhi_dev,
- mhi_chan->dir,
- buf_info->cb_buf,
- buf_info->len, MHI_EOT)) {
- dev_err(dev,
- "Error recycling buffer for chan:%d\n",
- mhi_chan->chan);
- kfree(buf_info->cb_buf);
- }
- }
-
read_lock_bh(&mhi_chan->lock);
}
break;
int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct sk_buff *skb, size_t len, enum mhi_flags mflags)
{
- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
- mhi_dev->dl_chan;
struct mhi_buf_info buf_info = { };
buf_info.v_addr = skb->data;
buf_info.cb_buf = skb;
buf_info.len = len;
- if (unlikely(mhi_chan->pre_alloc))
- return -EINVAL;
-
return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_skb);
if (ret)
goto error_pm_state;
- if (mhi_chan->dir == DMA_FROM_DEVICE)
- mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
-
- /* Pre-allocate buffer for xfer ring */
- if (mhi_chan->pre_alloc) {
- int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
- &mhi_chan->tre_ring);
- size_t len = mhi_cntrl->buffer_len;
-
- while (nr_el--) {
- void *buf;
- struct mhi_buf_info info = { };
-
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
- goto error_pre_alloc;
- }
-
- /* Prepare transfer descriptors */
- info.v_addr = buf;
- info.cb_buf = buf;
- info.len = len;
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
- if (ret) {
- kfree(buf);
- goto error_pre_alloc;
- }
- }
-
- read_lock_bh(&mhi_cntrl->pm_lock);
- if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
- read_lock_irq(&mhi_chan->lock);
- mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- read_unlock_irq(&mhi_chan->lock);
- }
- read_unlock_bh(&mhi_cntrl->pm_lock);
- }
-
mutex_unlock(&mhi_chan->mutex);
return 0;
error_init_chan:
mutex_unlock(&mhi_chan->mutex);
- return ret;
-
-error_pre_alloc:
- mutex_unlock(&mhi_chan->mutex);
- mhi_unprepare_channel(mhi_cntrl, mhi_chan);
-
return ret;
}
mhi_del_ring_element(mhi_cntrl, buf_ring);
mhi_del_ring_element(mhi_cntrl, tre_ring);
- if (mhi_chan->pre_alloc) {
- kfree(buf_info->cb_buf);
- } else {
- result.buf_addr = buf_info->cb_buf;
- mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
- }
+ result.buf_addr = buf_info->cb_buf;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
}
}
}
EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
-int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev)
-{
- return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
-}
-EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue);
-
void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
* @lpm_notify: The channel master requires low power mode notifications
* @offload_channel: The client manages the channel completely
* @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition
- * @auto_queue: Framework will automatically queue buffers for DL traffic
* @wake-capable: Channel capable of waking up the system
*/
struct mhi_channel_config {
bool lpm_notify;
bool offload_channel;
bool doorbell_mode_switch;
- bool auto_queue;
bool wake_capable;
};
*/
int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
-/**
- * mhi_prepare_for_transfer_autoqueue - Setup UL and DL channels with auto queue
- * buffers for DL traffic
- * @mhi_dev: Device associated with the channels
- *
- * Allocate and initialize the channel context and also issue the START channel
- * command to both channels. Channels can be started only if both host and
- * device execution environments match and channels are in a DISABLED state.
- * The MHI core will automatically allocate and queue buffers for the DL traffic.
- */
-int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev);
-
/**
* mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer.
* Issue the RESET channel command and let the