struct igc_fpe_t {
struct ethtool_mmsv mmsv;
u32 tx_min_frag_size;
+ bool tx_enabled;
};
enum igc_mac_filter_type {
bool launchtime_enable; /* true if LaunchTime is enabled */
ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */
ktime_t last_ff_cycle; /* Last cycle with an active first flag */
+ bool preemptible; /* True if preemptible queue, false if express queue */
u32 start_time;
u32 end_time;
#define IGC_TXDCTL_WTHRESH_MASK GENMASK(20, 16)
#define IGC_TXDCTL_QUEUE_ENABLE_MASK GENMASK(25, 25)
#define IGC_TXDCTL_SWFLUSH_MASK GENMASK(26, 26)
+#define IGC_TXDCTL_PRIORITY_MASK GENMASK(27, 27)
+
#define IGC_TXDCTL_PTHRESH(x) FIELD_PREP(IGC_TXDCTL_PTHRESH_MASK, (x))
#define IGC_TXDCTL_HTHRESH(x) FIELD_PREP(IGC_TXDCTL_HTHRESH_MASK, (x))
#define IGC_TXDCTL_WTHRESH(x) FIELD_PREP(IGC_TXDCTL_WTHRESH_MASK, (x))
#define IGC_TXDCTL_QUEUE_ENABLE FIELD_PREP(IGC_TXDCTL_QUEUE_ENABLE_MASK, 1)
/* Transmit Software Flush */
#define IGC_TXDCTL_SWFLUSH FIELD_PREP(IGC_TXDCTL_SWFLUSH_MASK, 1)
+#define IGC_TXDCTL_PRIORITY(x) FIELD_PREP(IGC_TXDCTL_PRIORITY_MASK, (x))
+#define IGC_TXDCTL_PRIORITY_HIGH IGC_TXDCTL_PRIORITY(1)
#define IGC_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
first->tx_flags = tx_flags;
first->protocol = protocol;
+ /* For preemptible queue, manually pad the skb so that HW includes
+ * padding bytes in mCRC calculation
+ */
+ if (tx_ring->preemptible && skb->len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ goto out_drop;
+ skb_put(skb, ETH_ZLEN - skb->len);
+ }
+
tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);
if (tso < 0)
goto out_drop;
ring->start_time = 0;
ring->end_time = NSEC_PER_SEC;
ring->max_sdu = 0;
+ ring->preemptible = false;
}
spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
if (!validate_schedule(adapter, qopt))
return -EINVAL;
- /* preemptible isn't supported yet */
- if (qopt->mqprio.preemptible_tcs)
- return -EOPNOTSUPP;
+ if (qopt->mqprio.preemptible_tcs &&
+ !(adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)) {
+ NL_SET_ERR_MSG_MOD(qopt->extack,
+ "reverse-tsn-txq-prio private flag must be enabled before setting preemptible tc");
+ return -ENODEV;
+ }
igc_ptp_read(adapter, &now);
ring->max_sdu = 0;
}
+ igc_fpe_save_preempt_queue(adapter, &qopt->mqprio);
+
return 0;
}
return err;
}
+static void igc_fpe_configure_tx(struct ethtool_mmsv *mmsv, bool tx_enable)
+{
+ struct igc_fpe_t *fpe = container_of(mmsv, struct igc_fpe_t, mmsv);
+ struct igc_adapter *adapter;
+
+ adapter = container_of(fpe, struct igc_adapter, fpe);
+ adapter->fpe.tx_enabled = tx_enable;
+
+ /* Update config since tx_enabled affects preemptible queue configuration */
+ igc_tsn_offload_apply(adapter);
+}
+
static void igc_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
enum ethtool_mpacket type)
{
}
static const struct ethtool_mmsv_ops igc_mmsv_ops = {
+ .configure_tx = igc_fpe_configure_tx,
.send_mpacket = igc_fpe_send_mpacket,
};
void igc_fpe_init(struct igc_adapter *adapter)
{
adapter->fpe.tx_min_frag_size = TX_MIN_FRAG_SIZE;
+ adapter->fpe.tx_enabled = false;
ethtool_mmsv_init(&adapter->fpe.mmsv, adapter->netdev, &igc_mmsv_ops);
}
+static u32 igc_fpe_map_preempt_tc_to_queue(const struct igc_adapter *adapter,
+ unsigned long preemptible_tcs)
+{
+ struct net_device *dev = adapter->netdev;
+ u32 i, queue = 0;
+
+ for (i = 0; i < dev->num_tc; i++) {
+ u32 offset, count;
+
+ if (!(preemptible_tcs & BIT(i)))
+ continue;
+
+ offset = dev->tc_to_txq[i].offset;
+ count = dev->tc_to_txq[i].count;
+ queue |= GENMASK(offset + count - 1, offset);
+ }
+
+ return queue;
+}
+
+void igc_fpe_save_preempt_queue(struct igc_adapter *adapter,
+ const struct tc_mqprio_qopt_offload *mqprio)
+{
+ u32 preemptible_queue = igc_fpe_map_preempt_tc_to_queue(adapter,
+ mqprio->preemptible_tcs);
+
+ for (int i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *tx_ring = adapter->tx_ring[i];
+
+ tx_ring->preemptible = !!(preemptible_queue & BIT(i));
+ }
+}
+
static bool is_any_launchtime(struct igc_adapter *adapter)
{
int i;
wr32(IGC_TQAVCTRL, tqavctrl);
for (i = 0; i < adapter->num_tx_queues; i++) {
+ int reg_idx = adapter->tx_ring[i]->reg_idx;
+ u32 txdctl;
+
wr32(IGC_TXQCTL(i), 0);
wr32(IGC_STQT(i), 0);
wr32(IGC_ENDQT(i), NSEC_PER_SEC);
+
+ txdctl = rd32(IGC_TXDCTL(reg_idx));
+ txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH;
+ wr32(IGC_TXDCTL(reg_idx), txdctl);
}
wr32(IGC_QBVCYCLET_S, 0);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
+ u32 txdctl = rd32(IGC_TXDCTL(ring->reg_idx));
u32 txqctl = 0;
u16 cbs_value;
u32 tqavcc;
if (ring->launchtime_enable)
txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
+ if (!adapter->fpe.tx_enabled) {
+ /* fpe inactive: clear both flags */
+ txqctl &= ~IGC_TXQCTL_PREEMPTIBLE;
+ txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH;
+ } else if (ring->preemptible) {
+ /* fpe active + preemptible: enable preemptible queue + set low priority */
+ txqctl |= IGC_TXQCTL_PREEMPTIBLE;
+ txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH;
+ } else {
+ /* fpe active + express: enable express queue + set high priority */
+ txqctl &= ~IGC_TXQCTL_PREEMPTIBLE;
+ txdctl |= IGC_TXDCTL_PRIORITY_HIGH;
+ }
+
+ wr32(IGC_TXDCTL(ring->reg_idx), txdctl);
+
/* Skip configuring CBS for Q2 and Q3 */
if (i > 1)
goto skip_cbs;