]> git.ipfire.org Git - thirdparty/libvirt.git/commitdiff
Introduce command 'virsh domstats --memory' for reporting memory BW
authorWang Huaqiang <huaqiang.wang@intel.com>
Thu, 2 Jan 2020 10:45:05 +0000 (18:45 +0800)
committerDaniel P. Berrangé <berrange@redhat.com>
Mon, 6 Jan 2020 14:04:10 +0000 (14:04 +0000)
Introduce an option '--memory' for showing memory related
information. The memory bandwidth infomatio is listed as:

Domain: 'libvirt-vm'
 memory.bandwidth.monitor.count=4
 memory.bandwidth.monitor.0.name=vcpus_0-4
 memory.bandwidth.monitor.0.vcpus=0-4
 memory.bandwidth.monitor.0.node.count=2
 memory.bandwidth.monitor.0.node.0.id=0
 memory.bandwidth.monitor.0.node.0.bytes.total=10208067584
 memory.bandwidth.monitor.0.node.0.bytes.local=4807114752
 memory.bandwidth.monitor.0.node.1.id=1
 memory.bandwidth.monitor.0.node.1.bytes.total=8693735424
 memory.bandwidth.monitor.0.node.1.bytes.local=5850161152
 memory.bandwidth.monitor.1.name=vcpus_7
 memory.bandwidth.monitor.1.vcpus=7
 memory.bandwidth.monitor.1.node.count=2
 memory.bandwidth.monitor.1.node.0.id=0
 memory.bandwidth.monitor.1.node.0.bytes.total=853811200
 memory.bandwidth.monitor.1.node.0.bytes.local=290701312
 memory.bandwidth.monitor.1.node.1.id=1
 memory.bandwidth.monitor.1.node.1.bytes.total=406044672
 memory.bandwidth.monitor.1.node.1.bytes.local=229425152

Signed-off-by: Wang Huaqiang <huaqiang.wang@intel.com>
docs/manpages/virsh.rst
include/libvirt/libvirt-domain.h
src/libvirt-domain.c
src/qemu/qemu_driver.c
tools/virsh-domain-monitor.c

index fea0527caf937fd20b390109bcdc451592f505e1..d0f9e15c380907f9553f50aa71fcc5b412b8f5e3 100644 (file)
@@ -2186,7 +2186,7 @@ domstats
 
    domstats [--raw] [--enforce] [--backing] [--nowait] [--state]
       [--cpu-total] [--balloon] [--vcpu] [--interface]
-      [--block] [--perf] [--iothread]
+      [--block] [--perf] [--iothread] [--memory]
       [[--list-active] [--list-inactive]
        [--list-persistent] [--list-transient] [--list-running]y
        [--list-paused] [--list-shutoff] [--list-other]] | [domain ...]
@@ -2205,7 +2205,7 @@ behavior use the *--raw* flag.
 The individual statistics groups are selectable via specific flags. By
 default all supported statistics groups are returned. Supported
 statistics groups flags are: *--state*, *--cpu-total*, *--balloon*,
-*--vcpu*, *--interface*, *--block*, *--perf*, *--iothread*.
+*--vcpu*, *--interface*, *--block*, *--perf*, *--iothread*, *--memory*.
 
 Note that - depending on the hypervisor type and version or the domain state
 - not all of the following statistics may be returned.
@@ -2372,6 +2372,24 @@ not available for statistical purposes.
 * ``iothread.<id>.poll-shrink`` - polling time shrink value. A value of
   (zero) indicates shrink is managed by hypervisor.
 
+*--memory* returns:
+
+* ``memory.bandwidth.monitor.count`` - the number of memory bandwidth
+  monitors for this domain
+* ``memory.bandwidth.monitor.<num>.name``  - the name of monitor <num>
+* ``memory.bandwidth.monitor.<num>.vcpus`` - the vcpu list of monitor <num>
+* ``memory.bandwidth.monitor.<num>.node.count`` - the number of memory
+    controller in monitor <num>
+* ``memory.bandwidth.monitor.<num>.node.<index>.id`` - host allocated memory
+  controller id for controller <index> of monitor <num>
+* ``memory.bandwidth.monitor.<num>.node.<index>.bytes.local`` - the accumulative
+  bytes consumed by @vcpus that passing through the memory controller in the
+  same processor that the scheduled host CPU belongs to.
+* ``memory.bandwidth.monitor.<num>.node.<index>.bytes.total`` - the total
+  bytes consumed by @vcpus that passing through all memory controllers, either
+  local or remote controller.
+
+
 Selecting a specific statistics groups doesn't guarantee that the
 daemon supports the selected group of stats. Flag *--enforce*
 forces the command to fail if the daemon doesn't support the
index e60003978a8a9c57cde4de3ee18dd575842ef266..c1b9a9d1d0d4b450c75216989780e97f6db261a5 100644 (file)
@@ -2160,6 +2160,7 @@ typedef enum {
     VIR_DOMAIN_STATS_BLOCK = (1 << 5), /* return domain block info */
     VIR_DOMAIN_STATS_PERF = (1 << 6), /* return domain perf event info */
     VIR_DOMAIN_STATS_IOTHREAD = (1 << 7), /* return iothread poll info */
+    VIR_DOMAIN_STATS_MEMORY = (1 << 8), /* return domain memory info */
 } virDomainStatsTypes;
 
 typedef enum {
index 793eceb39f69b0dbd00cd936b8b36f4a3f9467e2..eb66999f0751c35725d0bbf75bd30f5c8378b79d 100644 (file)
@@ -11640,6 +11640,27 @@ virConnectGetDomainCapabilities(virConnectPtr conn,
  *                                 hypervisor to choose how to shrink the
  *                                 polling time.
  *
+ * VIR_DOMAIN_STATS_MEMORY:
+ *     Return memory bandwidth statistics and the usage information. The typed
+ *     parameter keys are in this format:
+ *
+ *     "memory.bandwidth.monitor.count" - the number of memory bandwidth
+ *                                        monitors for this domain
+ *     "memory.bandwidth.monitor.<num>.name" - the name of monitor <num>
+ *     "memory.bandwidth.monitor.<num>.vcpus" - the vcpu list of monitor <num>
+ *     "memory.bandwidth.monitor.<num>.node.count" - the number of memory
+ *                                            controller in monitor <num>
+ *     "memory.bandwidth.monitor.<num>.node.<index>.id" - host allocated memory
+ *                                                 controller id for controller
+ *                                                 <index> of monitor <num>
+ *     "memory.bandwidth.monitor.<num>.node.<index>.bytes.local" - the
+ *                       accumulative bytes consumed by @vcpus that passing
+ *                       through the memory controller in the same processor
+ *                       that the scheduled host CPU belongs to.
+ *     "memory.bandwidth.monitor.<num>.node.<index>.bytes.total" - the total
+ *                       bytes consumed by @vcpus that passing through all
+ *                       memory controllers, either local or remote controller.
+ *
  * Note that entire stats groups or individual stat fields may be missing from
  * the output in case they are not supported by the given hypervisor, are not
  * applicable for the current state of the guest domain, or their retrieval
index e92e8b5318708311c17a22eb8e8c7cdfae08ff89..ae54c0023941c21e1cf355e62ed1384edaf440c7 100644 (file)
@@ -20676,6 +20676,9 @@ qemuDomainGetResctrlMonData(virQEMUDriverPtr driver,
             features = caps->host.cache.monitor->features;
         break;
     case VIR_RESCTRL_MONITOR_TYPE_MEMBW:
+        if (caps->host.memBW.monitor)
+            features = caps->host.memBW.monitor->features;
+        break;
     case VIR_RESCTRL_MONITOR_TYPE_UNSUPPORT:
     case VIR_RESCTRL_MONITOR_TYPE_LAST:
         virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
@@ -20728,6 +20731,94 @@ qemuDomainGetResctrlMonData(virQEMUDriverPtr driver,
 }
 
 
+static int
+qemuDomainGetStatsMemoryBandwidth(virQEMUDriverPtr driver,
+                                  virDomainObjPtr dom,
+                                  virTypedParamListPtr params)
+{
+    virQEMUResctrlMonDataPtr *resdata = NULL;
+    char **features = NULL;
+    size_t nresdata = 0;
+    size_t i = 0;
+    size_t j = 0;
+    size_t k = 0;
+    int ret = -1;
+
+    if (!virDomainObjIsActive(dom))
+        return 0;
+
+    if (qemuDomainGetResctrlMonData(driver, dom, &resdata, &nresdata,
+                                    VIR_RESCTRL_MONITOR_TYPE_MEMBW) < 0)
+        goto cleanup;
+
+    if (nresdata == 0)
+        return 0;
+
+    if (virTypedParamListAddUInt(params, nresdata,
+                                 "memory.bandwidth.monitor.count") < 0)
+        goto cleanup;
+
+    for (i = 0; i < nresdata; i++) {
+        if (virTypedParamListAddString(params, resdata[i]->name,
+                                       "memory.bandwidth.monitor.%zu.name",
+                                       i) < 0)
+            goto cleanup;
+
+        if (virTypedParamListAddString(params, resdata[i]->vcpus,
+                                       "memory.bandwidth.monitor.%zu.vcpus",
+                                       i) < 0)
+            goto cleanup;
+
+        if (virTypedParamListAddUInt(params, resdata[i]->nstats,
+                                     "memory.bandwidth.monitor.%zu.node.count",
+                                     i) < 0)
+            goto cleanup;
+
+
+        for (j = 0; j < resdata[i]->nstats; j++) {
+            if (virTypedParamListAddUInt(params, resdata[i]->stats[j]->id,
+                                         "memory.bandwidth.monitor.%zu."
+                                         "node.%zu.id",
+                                         i, j) < 0)
+                goto cleanup;
+
+
+            features = resdata[i]->stats[j]->features;
+            for (k = 0; features[k]; k++) {
+                if (STREQ(features[k], "mbm_local_bytes")) {
+                    /* The accumulative data passing through local memory
+                     * controller is recorded with 64 bit counter. */
+                    if (virTypedParamListAddULLong(params,
+                                                   resdata[i]->stats[j]->vals[k],
+                                                   "memory.bandwidth.monitor."
+                                                   "%zu.node.%zu.bytes.local",
+                                                   i, j) < 0)
+                        goto cleanup;
+                }
+
+                if (STREQ(features[k], "mbm_total_bytes")) {
+                    /* The accumulative data passing through local and remote
+                     * memory controller is recorded with 64 bit counter. */
+                    if (virTypedParamListAddULLong(params,
+                                                   resdata[i]->stats[j]->vals[k],
+                                                   "memory.bandwidth.monitor."
+                                                   "%zu.node.%zu.bytes.total",
+                                                   i, j) < 0)
+                        goto cleanup;
+                }
+            }
+        }
+    }
+
+    ret = 0;
+ cleanup:
+    for (i = 0; i < nresdata; i++)
+        qemuDomainFreeResctrlMonData(resdata[i]);
+    VIR_FREE(resdata);
+    return ret;
+}
+
+
 static int
 qemuDomainGetStatsCpuCache(virQEMUDriverPtr driver,
                            virDomainObjPtr dom,
@@ -20836,6 +20927,17 @@ qemuDomainGetStatsCpu(virQEMUDriverPtr driver,
 }
 
 
+static int
+qemuDomainGetStatsMemory(virQEMUDriverPtr driver,
+                         virDomainObjPtr dom,
+                         virTypedParamListPtr params,
+                         unsigned int privflags G_GNUC_UNUSED)
+
+{
+    return qemuDomainGetStatsMemoryBandwidth(driver, dom, params);
+}
+
+
 static int
 qemuDomainGetStatsBalloon(virQEMUDriverPtr driver,
                           virDomainObjPtr dom,
@@ -21505,6 +21607,7 @@ static struct qemuDomainGetStatsWorker qemuDomainGetStatsWorkers[] = {
     { qemuDomainGetStatsBlock, VIR_DOMAIN_STATS_BLOCK, true },
     { qemuDomainGetStatsPerf, VIR_DOMAIN_STATS_PERF, false },
     { qemuDomainGetStatsIOThread, VIR_DOMAIN_STATS_IOTHREAD, true },
+    { qemuDomainGetStatsMemory, VIR_DOMAIN_STATS_MEMORY, false },
     { NULL, 0, false }
 };
 
index 5639ea56f93f804cd7d6bd3e560d44920a38f53a..9e6bc99bf2e2166cb854f48ecd7e575b55d4f4f2 100644 (file)
@@ -2130,6 +2130,10 @@ static const vshCmdOptDef opts_domstats[] = {
      .type = VSH_OT_BOOL,
      .help = N_("report domain IOThread information"),
     },
+    {.name = "memory",
+     .type = VSH_OT_BOOL,
+     .help = N_("report domain memory usage"),
+    },
     {.name = "list-active",
      .type = VSH_OT_BOOL,
      .help = N_("list only active domains"),
@@ -2246,6 +2250,9 @@ cmdDomstats(vshControl *ctl, const vshCmd *cmd)
     if (vshCommandOptBool(cmd, "iothread"))
         stats |= VIR_DOMAIN_STATS_IOTHREAD;
 
+    if (vshCommandOptBool(cmd, "memory"))
+        stats |= VIR_DOMAIN_STATS_MEMORY;
+
     if (vshCommandOptBool(cmd, "list-active"))
         flags |= VIR_CONNECT_GET_ALL_DOMAINS_STATS_ACTIVE;