};
/**
- * ice_pack_txq_ctx - Pack Tx queue context into a HW buffer
+ * ice_pack_txq_ctx - Pack Tx queue context into Admin Queue buffer
* @ctx: the Tx queue context to pack
- * @buf: the HW buffer to pack into
+ * @buf: the Admin Queue HW buffer to pack into
*
* Pack the Tx queue context from the CPU-friendly unpacked buffer into its
- * bit-packed HW layout.
+ * bit-packed Admin Queue layout.
*/
void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf)
{
QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
}
+/**
+ * ice_pack_txq_ctx_full - Pack Tx queue context into a HW buffer
+ * @ctx: the Tx queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Tx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed HW layout, including the internal data portion.
+ */
+static void ice_pack_txq_ctx_full(const struct ice_tlan_ctx *ctx,
+ ice_txq_ctx_buf_full_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_unpack_txq_ctx_full - Unpack Tx queue context from a HW buffer
+ * @buf: the HW buffer to unpack from
+ * @ctx: the Tx queue context to unpack
+ *
+ * Unpack the Tx queue context from the HW buffer (including the full internal
+ * state) into the CPU-friendly structure.
+ */
+static void ice_unpack_txq_ctx_full(const ice_txq_ctx_buf_full_t *buf,
+ struct ice_tlan_ctx *ctx)
+{
+ unpack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_copy_txq_ctx_from_hw - Copy Tx Queue context from HW registers
+ * @hw: pointer to the hardware structure
+ * @txq_ctx: pointer to the packed Tx queue context, including internal state
+ * @txq_index: the index of the Tx queue
+ *
+ * Copy Tx Queue context from HW register space to dense structure
+ */
+static void ice_copy_txq_ctx_from_hw(struct ice_hw *hw,
+ ice_txq_ctx_buf_full_t *txq_ctx,
+ u32 txq_index)
+{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ u32 *ctx = (u32 *)txq_ctx;
+ u32 txq_base, reg;
+
+ /* Get Tx queue base within card space */
+ txq_base = rd32(hw, PFLAN_TX_QALLOC(hw->pf_id));
+ txq_base = FIELD_GET(PFLAN_TX_QALLOC_FIRSTQ_M, txq_base);
+
+ reg = FIELD_PREP(GLCOMM_QTX_CNTX_CTL_CMD_M,
+ GLCOMM_QTX_CNTX_CTL_CMD_READ) |
+ FIELD_PREP(GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M,
+ txq_base + txq_index) |
+ GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M;
+
+ /* Prevent other PFs on the same adapter from accessing the Tx queue
+ * context interface concurrently.
+ */
+ spin_lock(&pf->adapter->txq_ctx_lock);
+
+ wr32(hw, GLCOMM_QTX_CNTX_CTL, reg);
+ ice_flush(hw);
+
+ /* Copy each dword separately from HW */
+ for (int i = 0; i < ICE_TXQ_CTX_FULL_SIZE_DWORDS; i++, ctx++) {
+ *ctx = rd32(hw, GLCOMM_QTX_CNTX_DATA(i));
+
+ ice_debug(hw, ICE_DBG_QCTX, "qtxdata[%d]: %08X\n", i, *ctx);
+ }
+
+ spin_unlock(&pf->adapter->txq_ctx_lock);
+}
+
+/**
+ * ice_copy_txq_ctx_to_hw - Copy Tx Queue context into HW registers
+ * @hw: pointer to the hardware structure
+ * @txq_ctx: pointer to the packed Tx queue context, including internal state
+ * @txq_index: the index of the Tx queue
+ */
+static void ice_copy_txq_ctx_to_hw(struct ice_hw *hw,
+ const ice_txq_ctx_buf_full_t *txq_ctx,
+ u32 txq_index)
+{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ u32 txq_base, reg;
+
+ /* Get Tx queue base within card space */
+ txq_base = rd32(hw, PFLAN_TX_QALLOC(hw->pf_id));
+ txq_base = FIELD_GET(PFLAN_TX_QALLOC_FIRSTQ_M, txq_base);
+
+ reg = FIELD_PREP(GLCOMM_QTX_CNTX_CTL_CMD_M,
+ GLCOMM_QTX_CNTX_CTL_CMD_WRITE_NO_DYN) |
+ FIELD_PREP(GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M,
+ txq_base + txq_index) |
+ GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M;
+
+ /* Prevent other PFs on the same adapter from accessing the Tx queue
+ * context interface concurrently.
+ */
+ spin_lock(&pf->adapter->txq_ctx_lock);
+
+ /* Copy each dword separately to HW */
+ for (int i = 0; i < ICE_TXQ_CTX_FULL_SIZE_DWORDS; i++) {
+ u32 ctx = ((const u32 *)txq_ctx)[i];
+
+ wr32(hw, GLCOMM_QTX_CNTX_DATA(i), ctx);
+
+ ice_debug(hw, ICE_DBG_QCTX, "qtxdata[%d]: %08X\n", i, ctx);
+ }
+
+ wr32(hw, GLCOMM_QTX_CNTX_CTL, reg);
+ ice_flush(hw);
+
+ spin_unlock(&pf->adapter->txq_ctx_lock);
+}
+
+/**
+ * ice_read_txq_ctx - Read Tx queue context from HW
+ * @hw: pointer to the hardware structure
+ * @tlan_ctx: pointer to the Tx queue context
+ * @txq_index: the index of the Tx queue
+ *
+ * Read the Tx queue context from the HW registers, then unpack it into the
+ * ice_tlan_ctx structure for use.
+ *
+ * Returns: 0 on success, or -EINVAL on an invalid Tx queue index.
+ */
+int ice_read_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
+ u32 txq_index)
+{
+ ice_txq_ctx_buf_full_t buf = {};
+
+ if (txq_index > QTX_COMM_HEAD_MAX_INDEX)
+ return -EINVAL;
+
+ ice_copy_txq_ctx_from_hw(hw, &buf, txq_index);
+ ice_unpack_txq_ctx_full(&buf, tlan_ctx);
+
+ return 0;
+}
+
+/**
+ * ice_write_txq_ctx - Write Tx queue context to HW
+ * @hw: pointer to the hardware structure
+ * @tlan_ctx: pointer to the Tx queue context
+ * @txq_index: the index of the Tx queue
+ *
+ * Pack the Tx queue context into the dense HW layout, then write it into the
+ * HW registers.
+ *
+ * Returns: 0 on success, or -EINVAL on an invalid Tx queue index.
+ */
+int ice_write_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
+ u32 txq_index)
+{
+ ice_txq_ctx_buf_full_t buf = {};
+
+ if (txq_index > QTX_COMM_HEAD_MAX_INDEX)
+ return -EINVAL;
+
+ ice_pack_txq_ctx_full(tlan_ctx, &buf);
+ ice_copy_txq_ctx_to_hw(hw, &buf, txq_index);
+
+ return 0;
+}
+
/* Sideband Queue command wrappers */
/**
#define GLCOMM_QUANTA_PROF_MAX_DESC_M ICE_M(0x3F, 24)
#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4))
+#define QTX_COMM_HEAD_MAX_INDEX 16383
#define QTX_COMM_HEAD_HEAD_S 0
#define QTX_COMM_HEAD_HEAD_M ICE_M(0x1FFF, 0)
#define PF_FW_ARQBAH 0x00080180
#define VPINT_ALLOC_PCI_VALID_M BIT(31)
#define VPINT_MBX_CTL(_VSI) (0x0016A000 + ((_VSI) * 4))
#define VPINT_MBX_CTL_CAUSE_ENA_M BIT(30)
+#define PFLAN_TX_QALLOC(_PF) (0x001D2580 + ((_PF) * 4))
+#define PFLAN_TX_QALLOC_FIRSTQ_M GENMASK(13, 0)
#define GLLAN_RCTL_0 0x002941F8
#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
#define GLNVM_ULD_POR_DONE_1_M BIT(8)
#define GLNVM_ULD_PCIER_DONE_2_M BIT(9)
#define GLNVM_ULD_PE_DONE_M BIT(10)
+#define GLCOMM_QTX_CNTX_CTL 0x002D2DC8
+#define GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M GENMASK(13, 0)
+#define GLCOMM_QTX_CNTX_CTL_CMD_M GENMASK(18, 16)
+#define GLCOMM_QTX_CNTX_CTL_CMD_READ 0
+#define GLCOMM_QTX_CNTX_CTL_CMD_WRITE 1
+#define GLCOMM_QTX_CNTX_CTL_CMD_RESET 3
+#define GLCOMM_QTX_CNTX_CTL_CMD_WRITE_NO_DYN 4
+#define GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M BIT(19)
+#define GLCOMM_QTX_CNTX_DATA(_i) (0x002D2D40 + ((_i) * 4))
#define GLPCI_CNF2 0x000BE004
#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
#define PF_FUNC_RID 0x0009E880