From: Michael Brown Date: Sun, 27 Apr 2014 21:35:48 +0000 (+0100) Subject: [intel] Push new RX descriptors in batches X-Git-Tag: v1.20.1~1215 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=b2c7b6a85e8078d5b6692156cb9850590a8ff6aa;p=thirdparty%2Fipxe.git [intel] Push new RX descriptors in batches Inside a virtual machine, writing the RX ring tail pointer may incur a substantial overhead of processing inside the hypervisor. Minimise this overhead by writing the tail pointer once per batch of descriptors, rather than once per descriptor. Profiling under qemu-kvm (version 1.6.2) shows that this reduces the amount of time taken to refill the RX descriptor ring by around 90%. Signed-off-by: Michael Brown --- diff --git a/src/drivers/net/intel.c b/src/drivers/net/intel.c index 5abcdd7f8..6684bdbd9 100644 --- a/src/drivers/net/intel.c +++ b/src/drivers/net/intel.c @@ -456,19 +456,20 @@ void intel_refill_rx ( struct intel_nic *intel ) { unsigned int rx_idx; unsigned int rx_tail; physaddr_t address; + unsigned int refilled = 0; + /* Refill ring */ while ( ( intel->rx.prod - intel->rx.cons ) < INTEL_RX_FILL ) { /* Allocate I/O buffer */ iobuf = alloc_iob ( INTEL_RX_MAX_LEN ); if ( ! iobuf ) { /* Wait for next refill */ - return; + break; } /* Get next receive descriptor */ rx_idx = ( intel->rx.prod++ % INTEL_NUM_RX_DESC ); - rx_tail = ( intel->rx.prod % INTEL_NUM_RX_DESC ); rx = &intel->rx.desc[rx_idx]; /* Populate receive descriptor */ @@ -477,20 +478,24 @@ void intel_refill_rx ( struct intel_nic *intel ) { rx->length = 0; rx->status = 0; rx->errors = 0; - wmb(); /* Record I/O buffer */ assert ( intel->rx_iobuf[rx_idx] == NULL ); intel->rx_iobuf[rx_idx] = iobuf; - /* Push descriptor to card */ - profile_start ( &intel_vm_refill_profiler ); - writel ( rx_tail, intel->regs + intel->rx.reg + INTEL_xDT ); - profile_stop ( &intel_vm_refill_profiler ); - DBGC2 ( intel, "INTEL %p RX %d is [%llx,%llx)\n", intel, rx_idx, ( ( unsigned long long ) address ), ( ( unsigned long long ) address + INTEL_RX_MAX_LEN ) ); + refilled++; + } + + /* Push descriptors to card, if applicable */ + if ( refilled ) { + wmb(); + rx_tail = ( intel->rx.prod % INTEL_NUM_RX_DESC ); + profile_start ( &intel_vm_refill_profiler ); + writel ( rx_tail, intel->regs + intel->rx.reg + INTEL_xDT ); + profile_stop ( &intel_vm_refill_profiler ); } }