20040729
Feature: enable SMTP session caching temporarily while a
- site has a high volume of mail in the active queue.
+ postfix is able to schedule back-to-back deliveries.
Parameter: smtp_connection_cache_on_demand (default:
yes). Files: smtp/smtp_connect.c, *qmgr/qmgr_entry.c,
*qmgr/qmgr_queue.c, *qmgr/qmgr_deliver.c.
addresses for (trivial-rewrite) stress testing. Victor
Duchovni, Morgan Stanley. File: smtpstone/smtp-source.c.
+20040730
+
+ Safety: disallow "opportunistic session caching" when the
+ queue manager is unable to schedule back-to-back deliveries.
+ File: *qmgr/qmgr_entry.c.
+
Open problems:
Low: update events.c so that 1-second timer requests do
* Patches change the patchlevel and the release date. Snapshots change the
* release date only.
*/
-#define MAIL_RELEASE_DATE "20040729"
+#define MAIL_RELEASE_DATE "20040730"
#define MAIL_VERSION_NUMBER "2.2"
#define VAR_MAIL_VERSION "mail_version"
struct QMGR_QUEUE {
int dflags; /* delivery request options */
+ time_t last_done; /* last delivery completion */
char *name; /* domain name or address */
char *nexthop; /* domain name */
int todo_refcount; /* queue entries (todo list) */
QMGR_ENTRY *qmgr_entry_select(QMGR_QUEUE *queue)
{
+ char *myname = "qmgr_entry_select";
QMGR_ENTRY *entry;
if ((entry = queue->todo.prev) != 0) {
queue->todo_refcount--;
QMGR_LIST_APPEND(queue->busy, entry);
queue->busy_refcount++;
+
+ /*
+ * With opportunistic session caching, the delivery agent must not
+ * only 1) save a session upon completion, but also 2) reuse a cached
+ * session upon the next delivery request. In order to not miss out
+ * on 2), we have to make caching sticky or else we get silly
+ * behavior when the in-memory queue drains. New connections must not
+ * be made while cached connections aren't being reused.
+ *
+ * Safety: don't enable opportunistic session caching until the queue
+ * manager is able to schedule back-to-back deliveries.
+ */
+ if ((queue->dflags & DEL_REQ_FLAG_SCACHE) == 0) {
+ if (queue->last_done + 1 >= event_time()) {
+ if (msg_verbose)
+ msg_info("%s: allowing on-demand session caching for %s",
+ myname, queue->name);
+ queue->dflags |= DEL_REQ_FLAG_SCACHE;
+ }
+ } else {
+ if (queue->last_done + 1 < event_time()) {
+ if (msg_verbose)
+ msg_info("%s: disallowing on-demand session caching for %s",
+ myname, queue->name);
+ queue->dflags &= ~DEL_REQ_FLAG_SCACHE;
+ }
+ }
}
return (entry);
}
myfree((char *) entry);
+ /*
+ * Maintain back-to-back delivery status.
+ */
+ queue->last_done = event_time();
+
/*
* When the in-core queue for this site is empty and when this site is
* not dead, discard the in-core queue. When this site is dead, but the
QMGR_ENTRY *qmgr_entry_create(QMGR_QUEUE *queue, QMGR_MESSAGE *message)
{
- char *myname = "qmgr_entry_create";
QMGR_ENTRY *entry;
/*
QMGR_LIST_APPEND(queue->todo, entry);
queue->todo_refcount++;
- /*
- * With opportunistic session caching, the delivery agent must not only
- * 1) save a session upon completion, but also 2) reuse a cached session
- * upon the next delivery request. In order to not miss out on 2), we
- * have to make caching sticky or else we get silly behavior when the
- * in-memory queue drains. New connections must not be made while cached
- * connections aren't being reused.
- */
- if ((queue->dflags & DEL_REQ_FLAG_SCACHE) == 0
- && queue->window < queue->todo_refcount + queue->busy_refcount) {
- if (msg_verbose)
- msg_info("%s: passing on-demand session caching threshold for %s",
- myname, queue->name);
- queue->dflags |= DEL_REQ_FLAG_SCACHE;
- }
-
/*
* Warn if a destination is falling behind while the active queue
* contains a non-trivial amount of single-recipient email. When a
queue = (QMGR_QUEUE *) mymalloc(sizeof(QMGR_QUEUE));
qmgr_queue_count++;
queue->dflags = 0;
+ queue->last_done = 0;
queue->name = mystrdup(name);
queue->nexthop = mystrdup(nexthop);
queue->todo_refcount = 0;
struct QMGR_QUEUE {
int dflags; /* delivery request options */
+ time_t last_done; /* last delivery completion */
char *name; /* domain name or address */
char *nexthop; /* domain name */
int todo_refcount; /* queue entries (todo list) */
QMGR_ENTRY *qmgr_entry_select(QMGR_PEER *peer)
{
+ char *myname = "qmgr_entry_select";
QMGR_ENTRY *entry;
QMGR_QUEUE *queue;
queue->busy_refcount++;
QMGR_LIST_UNLINK(peer->entry_list, QMGR_ENTRY *, entry, peer_peers);
peer->job->selected_entries++;
+
+ /*
+ * With opportunistic session caching, the delivery agent must not
+ * only 1) save a session upon completion, but also 2) reuse a cached
+ * session upon the next delivery request. In order to not miss out
+ * on 2), we have to make caching sticky or else we get silly
+ * behavior when the in-memory queue drains. New connections must not
+ * be made while cached connections aren't being reused.
+ *
+ * Safety: don't enable opportunistic session caching until the queue
+ * manager is able to schedule back-to-back deliveries.
+ */
+ if ((queue->dflags & DEL_REQ_FLAG_SCACHE) == 0) {
+ if (queue->last_done + 1 >= event_time()) {
+ if (msg_verbose)
+ msg_info("%s: allowing on-demand session caching for %s",
+ myname, queue->name);
+ queue->dflags |= DEL_REQ_FLAG_SCACHE;
+ }
+ } else {
+ if (queue->last_done + 1 < event_time()) {
+ if (msg_verbose)
+ msg_info("%s: disallowing on-demand session caching for %s",
+ myname, queue->name);
+ queue->dflags &= ~DEL_REQ_FLAG_SCACHE;
+ }
+ }
}
return (entry);
}
if (peer->refcount == 0)
qmgr_peer_free(peer);
+ /*
+ * Maintain back-to-back delivery status.
+ */
+ queue->last_done = event_time();
+
/*
* When the in-core queue for this site is empty and when this site is
* not dead, discard the in-core queue. When this site is dead, but the
QMGR_ENTRY *qmgr_entry_create(QMGR_PEER *peer, QMGR_MESSAGE *message)
{
- char *myname = "qmgr_entry_create";
QMGR_ENTRY *entry;
QMGR_QUEUE *queue = peer->queue;
QMGR_LIST_APPEND(queue->todo, entry, queue_peers);
queue->todo_refcount++;
- /*
- * With opportunistic session caching, the delivery agent must not only
- * 1) save a session upon completion, but also 2) reuse a cached session
- * upon the next delivery request. In order to not miss out on 2), we
- * have to make caching sticky or else we get silly behavior when the
- * in-memory queue drains. New connections must not be made while cached
- * connections aren't being reused.
- */
- if ((queue->dflags & DEL_REQ_FLAG_SCACHE) == 0
- && queue->window < queue->todo_refcount + queue->busy_refcount) {
- if (msg_verbose)
- msg_info("%s: passing on-demand session caching threshold for %s",
- myname, queue->name);
- queue->dflags |= DEL_REQ_FLAG_SCACHE;
- }
-
/*
* Warn if a destination is falling behind while the active queue
* contains a non-trivial amount of single-recipient email. When a
queue = (QMGR_QUEUE *) mymalloc(sizeof(QMGR_QUEUE));
qmgr_queue_count++;
queue->dflags = 0;
+ queue->last_done = 0;
queue->name = mystrdup(name);
queue->nexthop = mystrdup(nexthop);
queue->todo_refcount = 0;