]> git.ipfire.org Git - thirdparty/libvirt.git/commitdiff
numad: Convert node list to cpumap before setting affinity
authorOsier Yang <jyang@redhat.com>
Mon, 16 Apr 2012 10:04:27 +0000 (18:04 +0800)
committerDaniel P. Berrange <berrange@redhat.com>
Mon, 16 Apr 2012 11:44:33 +0000 (12:44 +0100)
Instead of returning a CPUs list, numad returns NUMA node
list instead, this patch is to convert the node list to
cpumap before affinity setting. Otherwise, the domain
processes will be pinned only to CPU[$numa_cell_num],
which will cause significiant performance losses.

Also because numad will balance the affinity dynamically,
reflecting the cpuset from numad back doesn't make much
sense then, and it may just could produce confusion for
the users. Thus the better way is not to reflect it back
to XML. And in this case, it's better to ignore the cpuset
when parsing XML.

The codes to update the cpuset is removed in this patch
incidentally, and there will be a follow up patch to ignore
the manually specified "cpuset" if "placement" is "auto",
and document will be updated too.
(cherry picked from commit ccf80e36301d538505c5c053cf369a61d4671831)

src/qemu/qemu_process.c

index 828b2a43473bdf622140fb7c4b37df2401aba2b4..0ec365d9f568a0b1f0c7c34d6c04edbe30a54371 100644 (file)
@@ -1820,38 +1820,45 @@ qemuProcessInitCpuAffinity(struct qemud_driver *driver,
     }
 
     if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) {
-        char *tmp_cpumask = NULL;
         char *nodeset = NULL;
+        char *nodemask = NULL;
 
         nodeset = qemuGetNumadAdvice(vm->def);
         if (!nodeset)
             goto cleanup;
 
-        if (VIR_ALLOC_N(tmp_cpumask, VIR_DOMAIN_CPUMASK_LEN) < 0) {
+        if (VIR_ALLOC_N(nodemask, VIR_DOMAIN_CPUMASK_LEN) < 0) {
             virReportOOMError();
             VIR_FREE(nodeset);
             goto cleanup;
         }
 
-        if (virDomainCpuSetParse(nodeset, 0, tmp_cpumask,
+        if (virDomainCpuSetParse(nodeset, 0, nodemask,
                                  VIR_DOMAIN_CPUMASK_LEN) < 0) {
-            VIR_FREE(tmp_cpumask);
+            VIR_FREE(nodemask);
             VIR_FREE(nodeset);
             goto cleanup;
         }
         VIR_FREE(nodeset);
 
-        for (i = 0; i < maxcpu && i < VIR_DOMAIN_CPUMASK_LEN; i++) {
-            if (tmp_cpumask[i])
-                VIR_USE_CPU(cpumap, i);
+        /* numad returns the NUMA node list, convert it to cpumap */
+        int prev_total_ncpus = 0;
+        for (i = 0; i < driver->caps->host.nnumaCell; i++) {
+            int j;
+            int cur_ncpus = driver->caps->host.numaCell[i]->ncpus;
+            if (nodemask[i]) {
+                for (j = prev_total_ncpus;
+                     j < cur_ncpus + prev_total_ncpus &&
+                     j < maxcpu &&
+                     j < VIR_DOMAIN_CPUMASK_LEN;
+                     j++) {
+                    VIR_USE_CPU(cpumap, j);
+                }
+            }
+            prev_total_ncpus += cur_ncpus;
         }
 
-        VIR_FREE(vm->def->cpumask);
-        vm->def->cpumask = tmp_cpumask;
-        if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
-            VIR_WARN("Unable to save status on vm %s after state change",
-                     vm->def->name);
-        }
+        VIR_FREE(nodemask);
     } else {
         if (vm->def->cpumask) {
             /* XXX why don't we keep 'cpumask' in the libvirt cpumap