From: Yury Norov Date: Tue, 28 Jan 2025 16:46:32 +0000 (-0500) Subject: ibmvnic: simplify ibmvnic_set_queue_affinity() X-Git-Tag: v6.15-rc1~222^2~16 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=2a402aa64c10251093273656891bf788cd95677f;p=thirdparty%2Flinux.git ibmvnic: simplify ibmvnic_set_queue_affinity() A loop based on cpumask_next_wrap() opencodes the dedicated macro for_each_online_cpu_wrap(). Use it as it improves readability and simplifies maintenance. This also helps to drop cpumask handling code in the caller function. Signed-off-by: Yury Norov Tested-by: Nick Child --- diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index e95ae0d39948c..bef18ff69065f 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -234,11 +234,17 @@ static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue, (*stragglers)--; } /* atomic write is safer than writing bit by bit directly */ - for (i = 0; i < stride; i++) { - cpumask_set_cpu(*cpu, mask); - *cpu = cpumask_next_wrap(*cpu, cpu_online_mask, - nr_cpu_ids, false); + for_each_online_cpu_wrap(i, *cpu) { + if (!stride--) { + /* For the next queue we start from the first + * unused CPU in this queue + */ + *cpu = i; + break; + } + cpumask_set_cpu(i, mask); } + /* set queue affinity mask */ cpumask_copy(queue->affinity_mask, mask); rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask); @@ -256,7 +262,7 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter) int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0; int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0; int total_queues, stride, stragglers, i; - unsigned int num_cpu, cpu; + unsigned int num_cpu, cpu = 0; bool is_rx_queue; int rc = 0; @@ -274,8 +280,6 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter) stride = max_t(int, num_cpu / total_queues, 1); /* number of leftover cpu's */ stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0; - /* next available cpu to assign irq to */ - cpu = cpumask_next(-1, cpu_online_mask); for (i = 0; i < total_queues; i++) { is_rx_queue = false;