struct ena_admin_rss_ind_table_entry inline_entry;
};
-/* When hint value is 0, driver should use it's own predefined value */
+/* When hint value is 0, driver should use its own predefined value */
struct ena_admin_ena_hw_hints {
/* value in ms */
u16 mmio_read_timeout;
unsigned int dma_desc_align_size = dma_get_cache_alignment();
int err;
- /* Setup paramaters for syncing RX/TX DMA descriptors */
+ /* Setup parameters for syncing RX/TX DMA descriptors */
dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
err = b44_pci_init();
}
}
-/* maps unmapped priorities to to the same COS as L2 */
+/* maps unmapped priorities to the same COS as L2 */
static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
{
int i;
* pf B succeeds in taking the same lock since they are from the same port.
* pf A takes the per pf misc lock. Performs eeprom access.
* pf A finishes. Unlocks the per pf misc lock.
- * Pf B takes the lock and proceeds to perform it's own access.
+ * Pf B takes the lock and proceeds to perform its own access.
* pf A unlocks the per port lock, while pf B is still working (!).
- * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
+ * mcp takes the per port lock and corrupts pf B's access (and/or has its own
* access corrupted by pf B)
*/
static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
-/* microcode fixed page page size 4K (chains and ring segments) */
+/* microcode fixed page size 4K (chains and ring segments) */
#define MC_PAGE_SIZE 4096
/* Number of indices per slow-path SB */
* @bp: driver handle
*
* Returns the recovery leader resource id according to the engine this function
- * belongs to. Currently only only 2 engines is supported.
+ * belongs to. Currently only 2 engines is supported.
*/
static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
{
/**
* Delete all configured elements having the given
* vlan_mac_flags specification. Assumes no pending for
- * execution commands. Will schedule all all currently
+ * execution commands. Will schedule all currently
* configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
* specification for deletion and will use the given
* ramrod_flags for the last DEL operation.
bnxt_free_ctx_mem(bp, false);
netdev_unlock(netdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
/* There are two cases:
* 1.If firmware spec < 0x10202,VF MAC address is not forwarded
* to the PF and so it doesn't have to match
- * 2.Allow VF to modify it's own MAC when PF has not assigned a
+ * 2.Allow VF to modify its own MAC when PF has not assigned a
* valid MAC address and firmware spec >= 0x10202
*/
mac_ok = true;
tg3_flag_set(tp, PCIX_TARGET_HWBUG);
- /* The chip can have it's power management PCI config
+ /* The chip can have its power management PCI config
* space registers clobbered due to this bug.
* So explicitly force the chip into D0 here.
*/
response of the request.
* 0: the request will wait until its response gets back
* from the firmware within LIO_SC_MAX_TMO_MS milli sec.
- * It the response does not return within
+ * If the response does not return within
* LIO_SC_MAX_TMO_MS milli sec, lio_process_ordered_list()
* will move the request to zombie response list.
*
* @param oct - octeon device pointer
* @param ndata - control structure with queueing, and buffer information
*
- * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if the
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int octnet_send_nic_data_pkt(struct octeon_device *oct,
/** Send a NIC control packet to the device
* @param oct - octeon device pointer
* @param nctrl - control structure with command, timout, and callback info
- * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if the
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int
pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
- /* TERMINATOR - PL_INTERUPTS_EXT */
+ /* TERMINATOR - PL_INTERRUPTS_EXT */
pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
pl_intr |= F_PL_INTR_EXT;
writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
elmer &= ~ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
- /* TERMINATOR - PL_INTERUPTS_EXT */
+ /* TERMINATOR - PL_INTERRUPTS_EXT */
/* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
* COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
*/
elmer |= ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
- /* TERMINATOR - PL_INTERUPTS_EXT
+ /* TERMINATOR - PL_INTERRUPTS_EXT
*/
pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
pl_intr |= F_PL_INTR_EXT;
/* ??? If this fails, might be able to software reset the XAUI part
* and try to recover... thus saving us from doing another HW reset */
- /* Has the XAUI MABC PLL circuitry stablized? */
+ /* Has the XAUI MABC PLL circuitry stabilized? */
is_xaui_mabc_pll_locked =
(val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
* (value, mask) tuples. The associated ingress packet field matches the
* tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
* rule can be constructed by specifying a tuple of (0, 0).) A filter rule
- * matches an ingress packet when all of the individual individual field
+ * matches an ingress packet when all of the individual field
* matching rules are true.
*
* Partial field masks are always valid, however, while it may be easy to
}
if (max_tx_rate == 0) {
- /* unbind VF to to any Traffic Class */
+ /* unbind VF to any Traffic Class */
fw_pfvf =
(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
goto bye;
}
- /* Get FW from from /lib/firmware/ */
+ /* Get FW from /lib/firmware/ */
ret = request_firmware(&fw, fw_info->fw_mod_name,
adap->pdev_dev);
if (ret < 0) {
link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
/* Ensure that uhtid is either root u32 (i.e. 0x800)
- * or a a valid linked bucket.
+ * or a valid linked bucket.
*/
if (uhtid != 0x800 && uhtid >= t->size)
return -EINVAL;
uhtid = TC_U32_USERHTID(cls->knode.handle);
/* Ensure that uhtid is either root u32 (i.e. 0x800)
- * or a a valid linked bucket.
+ * or a valid linked bucket.
*/
if (uhtid != 0x800 && uhtid >= t->size)
return -EINVAL;
* for DMA, but this is of course never sent to the hardware and is only used
* to prevent double unmappings. All of the above requires that the Free List
* Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
- * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
+ * 32-byte or a power of 2 greater in alignment. Since the SGE's minimal
* Free List Buffer alignment is 32 bytes, this works out for us ...
*/
enum {
return 0;
}
- /* Otherwise, ask the firmware for it's Device Log Parameters.
+ /* Otherwise, ask the firmware for its Device Log Parameters.
*/
memset(&devlog_cmd, 0, sizeof(devlog_cmd));
devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
/**
* t4vf_sge_alloc_rxq - allocate an SGE RX Queue
* @adapter: the adapter
- * @rspq: pointer to to the new rxq's Response Queue to be filled in
+ * @rspq: pointer to the new rxq's Response Queue to be filled in
* @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
* @dev: the network device associated with the new rspq
* @intr_dest: MSI-X vector index (overriden in MSI mode)
* separately. The actual Ingress Packet Data alignment boundary
* within Packed Buffer Mode is the maximum of these two
* specifications. (Note that it makes no real practical sense to
- * have the Pading Boudary be larger than the Packing Boundary but you
+ * have the Padding Boundary be larger than the Packing Boundary but you
* could set the chip up that way and, in fact, legacy T4 code would
* end doing this because it would initialize the Padding Boundary and
* leave the Packing Boundary initialized to 0 (16 bytes).)
(PCI_SLOT(pdev->devfn) == 12))) {
/* Cobalt MAC address in first EEPROM locations. */
sa_offset = 0;
- /* Ensure our media table fixup get's applied */
+ /* Ensure our media table fixup gets applied */
memcpy(ee_data + 16, ee_data, 8);
}
#endif
/* Disable all interrupts */
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
- /* Release phy lock to allow ftgmac100_reset to aquire it, keeping lock
+ /* Release phy lock to allow ftgmac100_reset to acquire it, keeping lock
* order consistent to prevent dead lock.
*/
if (netdev->phydev)
*/
if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
!ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
- /* This smp_store_release() pairs with smp_load_aquire() in
+ /* This smp_store_release() pairs with smp_load_acquire() in
* hns3_nic_reclaim_desc(). Ensure that the BD valid bit
* is updated.
*/
return;
}
- /* This smp_store_release() pairs with smp_load_aquire() in
+ /* This smp_store_release() pairs with smp_load_acquire() in
* hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated.
*/
smp_store_release(&ring->last_to_use, ring->next_to_use);
desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
/* This may be called inside atomic sections,
- * so GFP_ATOMIC is more suitalbe here
+ * so GFP_ATOMIC is more suitable here
*/
desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
if (!desc)
/**
* ceq_elements_init - Initialize all the elements in the ceq
* @eq: the event queue
- * @init_val: value to init with it the elements
+ * @init_val: value to init the elements with
**/
static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
{
HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) |
HINIC_MBOX_HEADER_SET(direction, DIRECTION) |
HINIC_MBOX_HEADER_SET(cmd, CMD) |
- /* The vf's offset to it's associated pf */
+ /* The vf's offset to its associated pf */
HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) |
HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) |
HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev->hwif),
pins.gpio_4 = pf->ptp_pins->gpio_4;
/* To turn on the pin - find the corresponding one based on
- * the given index. To to turn the function off - find
+ * the given index. To turn the function off - find
* which pin had it assigned. Don't use ptp_find_pin here
* because it tries to lock the pincfg_mux which is locked by
* ptp_pin_store() that calls here.
* struct ice_dynamic_port - Track dynamically added devlink port instance
* @hw_addr: the HW address for this port
* @active: true if the port has been activated
- * @attached: true it the prot is attached
+ * @attached: true if the prot is attached
* @devlink_port: the associated devlink port structure
* @pf: pointer to the PF private structure
* @vsi: the VSI associated with this port
return ring->q_index - ring->ch->base_q;
/* Idea here for calculation is that we subtract the number of queue
- * count from TC that ring belongs to from it's absolute queue index
+ * count from TC that ring belongs to from its absolute queue index
* and as a result we get the queue's index within TC.
*/
return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
if (!netdev)
return;
- /* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */
+ /* CHNL VSI doesn't have its own netdev, hence, no netdev_tc */
if (vsi->type == ICE_VSI_CHNL)
return;
* | 8 bit s | | 32 bits |
* +---------------+ +---------------+
*
- * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L
+ * The increment value is added to the GLTSYN_TIME_R and GLTSYN_TIME_L
* registers every clock source tick. Depending on the specific device
* configuration, the clock source frequency could be one of a number of
* values.
goto out;
/* If requested flow control is set to default, set flow control
- * to the both 'rx' and 'tx' pause frames.
+ * to both 'rx' and 'tx' pause frames.
*/
if (hw->fc.requested_mode == igc_fc_default)
hw->fc.requested_mode = igc_fc_full;
memset(msgbuf, 0, sizeof(msgbuf));
/* If index is one then this is the start of a new list and needs
- * indication to the PF so it can do it's own list management.
+ * indication to the PF so it can do its own list management.
* If it is zero then that tells the PF to just clear all of
* this VF's macvlans and there is no new list.
*/
/* Packet size */
int pkt_size;
- /* Size of the buffer acces through DMA*/
+ /* Size of the buffer access through DMA */
u32 buf_size;
/* BPPE virtual base address */
int err = 0;
u64 val;
- /* Check if PF_FUNC wants to use it's own local memory as LMTLINE
+ /* Check if PF_FUNC wants to use its own local memory as LMTLINE
* region, if so, convert that IOVA to physical address and
* populate LMT table with that address
*/
(ltdefs->rx_apad1.ltype_match << 4) |
ltdefs->rx_apad1.ltype_mask);
- /* Receive ethertype defination register defines layer
+ /* Receive ethertype definition register defines layer
* information in NPC_RESULT_S to identify the Ethertype
* location in L2 header. Used for Ethertype overwriting
* in inline IPsec flow.
int work_done = 0;
/*
- * We call txq_reclaim every time since in NAPI interupts are disabled
- * and due to this we miss the TX_DONE interrupt,which is not updated in
- * interrupt status register.
+ * We call txq_reclaim every time since in NAPI interrupts are disabled
+ * and due to this we miss the TX_DONE interrupt, which is not updated
+ * in interrupt status register.
*/
txq_reclaim(dev, 0);
if (netif_queue_stopped(dev)
enum mlx5e_dma_map_type type;
};
-/* Keep this enum consistent with with the corresponding strings array
+/* Keep this enum consistent with the corresponding strings array
* declared in en/reporter_tx.c
*/
enum {
standalone = ch->rx_count + ch->tx_count;
/* Limits for standalone queues:
- * - each queue has it's own NAPI (num_napi >= rx + tx + combined)
+ * - each queue has its own NAPI (num_napi >= rx + tx + combined)
* - combining queues (combined not 0, rx or tx must be 0)
*/
if ((ch->rx_count && ch->tx_count && ch->combined_count) ||
/* When running in DMA Mode the RX interrupt is not enabled in
timberdale because RX data is received by DMA callbacks
it must still be enabled in the KS8842 because it indicates
- to timberdale when there is RX data for it's DMA FIFOs */
+ to timberdale when there is RX data for its DMA FIFOs */
iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER);
ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
} else {
/*
* rx_traffic_int reg is an R1 register, writing all 1's
* will ensure that the actual interrupt causing bit
- * get's cleared and hence a read can be avoided.
+ * gets cleared and hence a read can be avoided.
*/
if (reason & GEN_INTR_RXTRAFFIC)
writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
/*
* tx_traffic_int reg is an R1 register, writing all 1's
- * will ensure that the actual interrupt causing bit get's
+ * will ensure that the actual interrupt causing bit gets
* cleared and hence a read can be avoided.
*/
if (reason & GEN_INTR_TXTRAFFIC)
* first IPv4 header. If the receive packet
* contains both a tunnel IPv4 header and a
* transport IPv4 header, the device validates the
- * checksum for the both IPv4 headers.
+ * checksum for both IPv4 headers.
*
* IONIC_RXQ_COMP_CSUM_F_IP_BAD:
* The IPv4 checksum calculated by the device did
}
/* CID map / ILT shadow table / T2
- * The talbes sizes are determined by the computations above
+ * The table sizes are determined by the computations above
*/
rc = qed_cxt_tables_alloc(p_hwfn);
if (rc)
} else if (ppb == 1) {
/* This is a special case as its the only value which wouldn't
* fit in a s64 variable. In order to prevent castings simple
- * handle it seperately.
+ * handle it separately.
*/
best_val = 4;
best_period = 0xee6b27f;
"Remote error detected. Calling ql_port_start()\n");
/*
* ql_port_start() is shared code and needs
- * to lock the PHY on it's own.
+ * to lock the PHY on its own.
*/
ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
if (ql_port_start(qdev)) /* Restart port */
dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
}
-/* POST FW related definations*/
+/* POST FW related definitions*/
#define QLC_83XX_POST_SIGNATURE_REG 0x41602014
#define QLC_83XX_POST_MODE_REG 0x41602018
#define QLC_83XX_POST_FAST_MODE 0
goto error_put_device;
}
- /* v2 SGMII has a per-lane digital digital, so parse it if it exists */
+ /* v2 SGMII has a per-lane digital, so parse it if it exists */
res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 1);
if (res) {
phy->digital = ioremap(res->start, resource_size(res));
/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS
* Get descriptions for a set of sensors, specified as an array of sensor
* handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. Any handles which do not
- * correspond to a sensor currently managed by the MC will be dropped from from
+ * correspond to a sensor currently managed by the MC will be dropped from
* the response. This may happen when a sensor table update is in progress, and
* effectively means the set of usable sensors is the intersection between the
* sets of sensors known to the driver and the MC. On Riverhead this command is
* broken sensor, then the state of the response's MC_CMD_DYNAMIC_SENSORS_VALUE
* entry will be set to BROKEN, and any value provided should be treated as
* erroneous. Any handles which do not correspond to a sensor currently managed
- * by the MC will be dropped from from the response. This may happen when a
+ * by the MC will be dropped from the response. This may happen when a
* sensor table update is in progress, and effectively means the set of usable
* sensors is the intersection between the sets of sensors known to the driver
* and the MC. On Riverhead this command is implemented as a wrapper for
* the named interface itself - INTF=..., PF=..., VF=VF_NULL to refer to a PF
* on a named interface - INTF=..., PF=..., VF=... to refer to a VF on a named
* interface where ... refers to a small integer for the VF/PF fields, and to
- * values from the PCIE_INTERFACE enum for for the INTF field. It's only
+ * values from the PCIE_INTERFACE enum for the INTF field. It's only
* meaningful to use INTF=CALLER within a structure that's an argument to
* MC_CMD_DEVEL_GET_CLIENT_HANDLE.
*/
if (efx->vf_count > vf_limit) {
netif_err(efx, probe, efx->net_dev,
- "Reducing VF count from from %d to %d\n",
+ "Reducing VF count from %d to %d\n",
efx->vf_count, vf_limit);
efx->vf_count = vf_limit;
}
#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
-/* interpretation is is sensor-specific. */
+/* interpretation is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
-/* interpretation is is sensor-specific. */
+/* interpretation is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
-/* interpretation is is sensor-specific. */
+/* interpretation is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
-/* interpretation is is sensor-specific. */
+/* interpretation is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
* handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST
*
* Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
+ * will be dropped from the response. This may happen when a sensor table
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
* provided should be treated as erroneous.
*
* Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
+ * will be dropped from the response. This may happen when a sensor table
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
rule = container_of(acts, struct efx_tc_flow_rule, acts);
if (rule->fallback)
fallback = rule->fallback;
- else /* fallback fallback: deliver to PF */
+ else /* fallback: deliver to PF */
fallback = &efx->tc->facts.pf;
rc = efx_mae_update_rule(efx, fallback->fw_id,
rule->fw_id);
pm_runtime_disable(&pdev->dev);
}
-/* standard register acces */
+/* standard register access */
static const struct smsc911x_ops standard_smsc911x_ops = {
.reg_read = __smsc911x_reg_read,
.reg_write = __smsc911x_reg_write,
* Description:
* This function validates the number of Unicast address entries supported
* by a particular Synopsys 10/100/1000 controller. The Synopsys controller
- * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
+ * supports 1..32, 64, or 128 Unicast filter entries for its Unicast filter
* logic. This function validates a valid, supported configuration is
* selected, and defaults to 1 Unicast address if an unsupported
* configuration is selected.
/* This looks hookey but the RX MAC reset we just did will
* undo some of the state we setup in niu_init_tx_mac() so we
* have to call it again. In particular, the RX MAC reset will
- * set the XMAC_MAX register back to it's default value.
+ * set the XMAC_MAX register back to its default value.
*/
niu_init_tx_mac(np);
niu_enable_tx_mac(np, 1);
struct niu_parent *parent;
u32 flags;
-#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removeable PHY detected*/
-#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removeable PHY */
+#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removable PHY detected*/
+#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removable PHY */
#define NIU_FLAGS_VPD_VALID 0x00800000 /* VPD has valid version */
#define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */
#define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */
/* Auto negotiation. The scheme is very simple. We have a timer routine
* that keeps watching the auto negotiation process as it progresses.
* The DP83840 is first told to start doing it's thing, we set up the time
- * and place the timer state machine in it's initial state.
+ * and place the timer state machine in its initial state.
*
* Here the timer peeks at the DP83840 status registers at each click to see
* if the auto negotiation has completed, we assume here that the DP83840 PHY
#define GLOB_PSIZE_6144 0x10 /* 6k packet size */
#define GLOB_PSIZE_8192 0x11 /* 8k packet size */
-/* In MACE mode, there are four qe channels. Each channel has it's own
+/* In MACE mode, there are four qe channels. Each channel has its own
* status bits in the QEC status register. This macro picks out the
* ones you want.
*/
* currently intrs are disabled (since we read ISR),
* and we have failed to register next poll.
* so we read the regs to trigger chip
- * and allow further interupts. */
+ * and allow further interrupts. */
READ_REG(priv, regTXF_WPTR_0);
READ_REG(priv, regRXD_WPTR_0);
}