--- /dev/null
+[func] fdupont
+ Added "pkt4-queue-full" and "pkt6-queue-full"
+ statistics which are increased when an incoming packet
+ was dropped because it was queued and the queue is full.
+ (Gitlab #4157)
| | | network, faulty clients, or a bug |
| | | in the server. |
+----------------------------------------------------+----------------+------------------------------------+
+ | pkt4-queue-full | integer | Number of incoming packets that |
+ | | | were dropped when the queue they |
+ | | | were parked became full. |
+ +----------------------------------------------------+----------------+------------------------------------+
| pkt4-receive-drop | integer | Number of incoming packets that |
| | | were dropped. The exact reason for |
| | | dropping packets is logged, but |
Dropped incoming packets can be counted in the ``pkt4-receive-drop`` and
a second counter detailing the drop cause:
+
- ``pkt4-service-disabled`` - DHCP service is disabled
+
- ``pkt4-parse-failed`` - packet parsing raised a fatal error
+- ``pkt6-queue-full`` - parked packet in a queue which became full
+
.. note::
The pool ID can be configured on each pool by explicitly setting the ``pool-id``
| | | relay agents, or a bug in the |
| | | server. |
+---------------------------------------------------+----------------+------------------------------------+
+ | pkt6-queue-full | integer | Number of incoming packets that |
+ | | | were dropped when the queue they |
+ | | | were parked became full. |
+ +---------------------------------------------------+----------------+------------------------------------+
| pkt6-solicit-received | integer | Number of SOLICIT packets |
| | | received. This statistic is |
| | | expected to grow; its increase |
Dropped incoming packets can be counted in the ``pkt6-receive-drop`` and
a second counter detailing the drop cause:
+
- ``pkt6-service-disabled`` - DHCP service is disabled
+
- ``pkt6-parse-failed`` - packet parsing raised a fatal error
+- ``pkt6-queue-full`` - parked packet in a queue which became full
+
.. note::
The pool ID can be configured on each pool by explicitly setting the ``pool-id``
"2023-06-13 20:42:46.616351"
]
],
+ "pkt4-queue-full": [
+ [
+ 0,
+ "2023-06-13 20:42:46.616352"
+ ]
+ ],
"pkt4-unknown-received": [
[
0,
"2023-06-13 21:28:57.177747"
]
],
+ "pkt6-queue-full": [
+ [
+ 0,
+ "2023-06-13 21:28:57.177747"
+ ]
+ ],
"pkt6-solicit-received": [
[
0,
"2023-06-13 20:42:46.616351"
]
],
+ "pkt4-queue-full": [
+ [
+ 0,
+ "2023-06-13 20:42:46.616352"
+ ]
+ ],
"pkt4-unknown-received": [
[
0,
"2023-06-13 21:28:57.177747"
]
],
+ "pkt6-queue-full": [
+ [
+ 0,
+ "2023-06-13 21:28:57.177747"
+ ]
+ ],
"pkt6-solicit-received": [
[
0,
.arg(holder_id->query_->getHWAddrLabel())
.arg(holder_id->query_->toText())
.arg(holder_id->thread_);
+ stats::StatsMgr::instance().addValue("pkt4-queue-full",
+ static_cast<int64_t>(1));
stats::StatsMgr::instance().addValue("pkt4-receive-drop",
static_cast<int64_t>(1));
}
.arg(holder_id->query_->getHWAddrLabel())
.arg(holder_id->query_->toText())
.arg(holder_id->thread_);
+ stats::StatsMgr::instance().addValue("pkt4-queue-full",
+ static_cast<int64_t>(1));
stats::StatsMgr::instance().addValue("pkt4-receive-drop",
static_cast<int64_t>(1));
}
.arg(holder_hw->query_->getHWAddrLabel())
.arg(holder_hw->query_->toText())
.arg(holder_hw->thread_);
+ stats::StatsMgr::instance().addValue("pkt4-queue-full",
+ static_cast<int64_t>(1));
stats::StatsMgr::instance().addValue("pkt4-receive-drop",
static_cast<int64_t>(1));
}
.arg(holder_hw->query_->getHWAddrLabel())
.arg(holder_hw->query_->toText())
.arg(holder_hw->thread_);
+ stats::StatsMgr::instance().addValue("pkt4-queue-full",
+ static_cast<int64_t>(1));
stats::StatsMgr::instance().addValue("pkt4-receive-drop",
static_cast<int64_t>(1));
}
"pkt4-nak-sent",
"pkt4-service-disabled",
"pkt4-parse-failed",
+ "pkt4-queue-full",
"pkt4-receive-drop",
"v4-allocation-fail",
"v4-allocation-fail-shared-network",
DHCP4_HOOK_SUBNET4_SELECT_PARKING_LOT_FULL)
.arg(limit)
.arg(query->getLabel());
+ isc::stats::StatsMgr::instance().addValue("pkt4-queue-full",
+ static_cast<int64_t>(1));
+ isc::stats::StatsMgr::instance().addValue("pkt4-receive-drop",
+ static_cast<int64_t>(1));
return (ConstSubnet4Ptr());
}
DHCP4_HOOK_SUBNET4_SELECT_4O6_PARKING_LOT_FULL)
.arg(limit)
.arg(query->getLabel());
+ isc::stats::StatsMgr::instance().addValue("pkt4-queue-full",
+ static_cast<int64_t>(1));
+ isc::stats::StatsMgr::instance().addValue("pkt4-receive-drop",
+ static_cast<int64_t>(1));
return (ConstSubnet4Ptr());
}
LOG_DEBUG(packet4_logger, DBGLVL_PKT_HANDLING, parking_lot_full_msg)
.arg(limit)
.arg(query->getLabel());
+ isc::stats::StatsMgr::instance().addValue("pkt4-queue-full",
+ static_cast<int64_t>(1));
isc::stats::StatsMgr::instance().addValue("pkt4-receive-drop",
static_cast<int64_t>(1));
return (Pkt4Ptr());
/// @brief Constructor.
///
- /// Creates the pkt4-receive-drop statistic.
+ /// Creates the pkt4-queue-full and pkt4-receive-drop statistics.
ClientHandleTest() : called1_(false), called2_(false), called3_(false) {
MultiThreadingMgr::instance().apply(false, 0, 0);
+ StatsMgr::instance().setValue("pkt4-queue-full", static_cast<int64_t>(0));
StatsMgr::instance().setValue("pkt4-receive-drop", static_cast<int64_t>(0));
}
/// @brief Check statistics.
///
- /// @param bumped True if pkt4-receive-drop should have been bumped by one,
+ /// @param bumped True if statistics should have been bumped by one,
/// false otherwise.
void checkStat(bool bumped) {
- ObservationPtr obs = StatsMgr::instance().getObservation("pkt4-receive-drop");
- ASSERT_TRUE(obs);
+ ObservationPtr obs_qf =
+ StatsMgr::instance().getObservation("pkt4-queue-full");
+ ObservationPtr obs_rd =
+ StatsMgr::instance().getObservation("pkt4-receive-drop");
+ ASSERT_TRUE(obs_qf);
+ ASSERT_TRUE(obs_rd);
if (bumped) {
- EXPECT_EQ(1, obs->getInteger().first);
+ EXPECT_EQ(1, obs_qf->getInteger().first);
+ EXPECT_EQ(1, obs_rd->getInteger().first);
} else {
- EXPECT_EQ(0, obs->getInteger().first);
+ EXPECT_EQ(0, obs_qf->getInteger().first);
+ EXPECT_EQ(0, obs_rd->getInteger().first);
}
}
"pkt4-nak-sent",
"pkt4-service-disabled",
"pkt4-parse-failed",
+ "pkt4-queue-full",
"pkt4-receive-drop",
"v4-allocation-fail",
"v4-allocation-fail-shared-network",
"leases4_committed", leases4_committed_park_callout));
// Statistic should not show any drops.
+ EXPECT_EQ(0, getStatistic("pkt4-queue-full"));
EXPECT_EQ(0, getStatistic("pkt4-receive-drop"));
// Create a client and initiate a DORA cycle for it.
ASSERT_EQ(1, parking_lot->size());
// Statistic should show one drop.
+ EXPECT_EQ(1, getStatistic("pkt4-queue-full"));
EXPECT_EQ(1, getStatistic("pkt4-receive-drop"));
// Invoking poll should run the scheduled action only for
ASSERT_EQ(0, parking_lot->size());
// Statistic should still show one drop.
+ EXPECT_EQ(1, getStatistic("pkt4-queue-full"));
EXPECT_EQ(1, getStatistic("pkt4-receive-drop"));
}
"lease4_offer", lease4_offer_park_callout));
// Statistic should not show any drops.
+ EXPECT_EQ(0, getStatistic("pkt4-queue-full"));
EXPECT_EQ(0, getStatistic("pkt4-receive-drop"));
// Create a client and initiate a DORA cycle for it.
ASSERT_EQ(1, parking_lot->size());
// Statistic should show one drop.
+ EXPECT_EQ(1, getStatistic("pkt4-queue-full"));
EXPECT_EQ(1, getStatistic("pkt4-receive-drop"));
// Invoking poll should run the scheduled action only for
ASSERT_EQ(0, parking_lot->size());
// Statistic should still show one drop.
+ EXPECT_EQ(1, getStatistic("pkt4-queue-full"));
EXPECT_EQ(1, getStatistic("pkt4-receive-drop"));
}
"pkt4-nak-sent",
"pkt4-service-disabled",
"pkt4-parse-failed",
+ "pkt4-queue-full",
"pkt4-receive-drop",
"v4-allocation-fail",
"v4-allocation-fail-shared-network",
.arg(holder->query_->makeLabel(holder->query_->getClientId(), nullptr))
.arg(holder->query_->toText())
.arg(holder->thread_);
+ stats::StatsMgr::instance().addValue("pkt6-queue-full",
+ static_cast<int64_t>(1));
stats::StatsMgr::instance().addValue("pkt6-receive-drop",
static_cast<int64_t>(1));
}
.arg(holder->query_->makeLabel(holder->query_->getClientId(), nullptr))
.arg(holder->query_->toText())
.arg(holder->thread_);
+ stats::StatsMgr::instance().addValue("pkt6-queue-full",
+ static_cast<int64_t>(1));
stats::StatsMgr::instance().addValue("pkt6-receive-drop",
static_cast<int64_t>(1));
}
"pkt6-addr-reg-reply-sent",
"pkt6-service-disabled",
"pkt6-parse-failed",
+ "pkt6-queue-full",
"pkt6-receive-drop",
"v6-allocation-fail",
"v6-allocation-fail-shared-network",
DHCP6_HOOK_LEASES6_PARKING_LOT_FULL)
.arg(parked_packet_limit)
.arg(query->getLabel());
+ isc::stats::StatsMgr::instance().addValue("pkt6-queue-full",
+ static_cast<int64_t>(1));
isc::stats::StatsMgr::instance().addValue("pkt6-receive-drop",
static_cast<int64_t>(1));
rsp.reset();
DHCP4_HOOK_SUBNET6_SELECT_PARKING_LOT_FULL)
.arg(limit)
.arg(question->getLabel());
+ isc::stats::StatsMgr::instance().addValue("pkt6-queue-full",
+ static_cast<int64_t>(1));
+ isc::stats::StatsMgr::instance().addValue("pkt6-receive-drop",
+ static_cast<int64_t>(1));
return (ConstSubnet6Ptr());
}
/// @brief Constructor.
///
- /// Creates the pkt6-receive-drop statistic.
+ /// Creates the pkt6-queue-full and pkt6-receive-drop statistics.
ClientHandleTest() : called1_(false), called2_(false), called3_(false) {
MultiThreadingMgr::instance().apply(false, 0, 0);
+ StatsMgr::instance().setValue("pkt6-queue-full", static_cast<int64_t>(0));
StatsMgr::instance().setValue("pkt6-receive-drop", static_cast<int64_t>(0));
}
/// @brief Check statistics.
///
- /// @param bumped True if pkt6-receive-drop should have been bumped by one,
+ /// @param bumped True if statistics should have been bumped by one,
/// false otherwise.
void checkStat(bool bumped) {
- ObservationPtr obs = StatsMgr::instance().getObservation("pkt6-receive-drop");
- ASSERT_TRUE(obs);
+ ObservationPtr obs_qf =
+ StatsMgr::instance().getObservation("pkt6-queue-full");
+ ObservationPtr obs_rd =
+ StatsMgr::instance().getObservation("pkt6-receive-drop");
+ ASSERT_TRUE(obs_qf);
+ ASSERT_TRUE(obs_rd);
if (bumped) {
- EXPECT_EQ(1, obs->getInteger().first);
+ EXPECT_EQ(1, obs_qf->getInteger().first);
+ EXPECT_EQ(1, obs_rd->getInteger().first);
} else {
- EXPECT_EQ(0, obs->getInteger().first);
+ EXPECT_EQ(0, obs_qf->getInteger().first);
+ EXPECT_EQ(0, obs_rd->getInteger().first);
}
}
"pkt6-addr-reg-reply-sent",
"pkt6-service-disabled",
"pkt6-parse-failed",
+ "pkt6-queue-full",
"pkt6-receive-drop",
"v6-allocation-fail",
"v6-allocation-fail-shared-network",
ASSERT_EQ(0, parking_lot->size());
// Statistic should not show any drops.
+ EXPECT_EQ(0, getStatistic("pkt6-queue-full"));
EXPECT_EQ(0, getStatistic("pkt6-receive-drop"));
// This callout uses provided IO service object to post a function
// Verify we have one parked packet and no drops.
ASSERT_EQ(1, parking_lot->size());
+ EXPECT_EQ(0, getStatistic("pkt6-queue-full"));
EXPECT_EQ(0, getStatistic("pkt6-receive-drop"));
// Reset all indicators because we'll be now creating a second client.
// Verify we have one parked packet and one drop.
ASSERT_EQ(1, parking_lot->size());
+ EXPECT_EQ(1, getStatistic("pkt6-queue-full"));
EXPECT_EQ(1, getStatistic("pkt6-receive-drop"));
// There should now be one action scheduled on our IO service
// Verify we have no parked packets and one drop.
ASSERT_EQ(0, parking_lot->size());
+ EXPECT_EQ(1, getStatistic("pkt6-queue-full"));
EXPECT_EQ(1, getStatistic("pkt6-receive-drop"));
// Should not anything to receive for client2.
// Verify we again have one parked packet and one drop.
ASSERT_EQ(1, parking_lot->size());
+ EXPECT_EQ(1, getStatistic("pkt6-queue-full"));
EXPECT_EQ(1, getStatistic("pkt6-receive-drop"));
// There should now be one action scheduled on our IO service
// Verify we no parked packets and one drop.
ASSERT_EQ(0, parking_lot->size());
+ EXPECT_EQ(1, getStatistic("pkt6-queue-full"));
EXPECT_EQ(1, getStatistic("pkt6-receive-drop"));
}
"pkt6-addr-reg-reply-sent",
"pkt6-service-disabled",
"pkt6-parse-failed",
+ "pkt6-queue-full",
"pkt6-receive-drop",
"v6-allocation-fail",
"v6-allocation-fail-shared-network",