queue manager is unable to schedule back-to-back deliveries.
File: *qmgr/qmgr_entry.c.
+20040730
+
+ Hysteresis: turn on "opportunistic session caching" when
+ back-to-back deliveries happen, but don't turn if off
+ until both concurrent and back-to-back delivery ends.
+
+20040801
+
+ Workaround: disable session caching for Linux < 2.2 (does
+ not work) or Glibc < 2 (does not compile). Files:
+ util/sys_defs.h, util/unix_{recv,send}_fd.c.
+
+ Portability: h_errno is not an lvalue in the UnixWare 7.1
+ multi-threaded environment. Olivier PRENANT.
+
Open problems:
Low: update events.c so that 1-second timer requests do
extern char *var_smtp_cache_dest;
#define VAR_SMTP_CACHE_DEMAND "smtp_connection_cache_on_demand"
+#ifndef DEF_SMTP_CACHE_DEMAND
#define DEF_SMTP_CACHE_DEMAND 1
+#endif
extern bool var_smtp_cache_demand;
#define VAR_SMTP_CONN_TMOUT "smtp_connect_timeout"
* Patches change the patchlevel and the release date. Snapshots change the
* release date only.
*/
-#define MAIL_RELEASE_DATE "20040730"
+#define MAIL_RELEASE_DATE "20040801"
#define MAIL_VERSION_NUMBER "2.2"
#define VAR_MAIL_VERSION "mail_version"
* only 1) save a session upon completion, but also 2) reuse a cached
* session upon the next delivery request. In order to not miss out
* on 2), we have to make caching sticky or else we get silly
- * behavior when the in-memory queue drains. New connections must not
- * be made while cached connections aren't being reused.
+ * behavior when the in-memory queue drains. Specifically, new
+ * connections must not be made as long as cached connections exist.
*
- * Safety: don't enable opportunistic session caching until the queue
- * manager is able to schedule back-to-back deliveries.
+ * Safety: don't enable opportunistic session caching unless the queue
+ * manager is able to schedule concurrent or back-to-back deliveries
+ * (we need to recognize back-to-back deliveries for transports with
+ * concurrency 1).
+ *
+ * XXX It would be nice if we could say "try to reuse a cached
+ * connection, but don't bother saving it when you're done". As long
+ * as we can't, we must not turn off session caching too early.
+ */
+#define CONCURRENT_OR_BACK_TO_BACK_DELIVERY() \
+ (queue->busy_refcount > 1 || BACK_TO_BACK_DELIVERY())
+
+#define BACK_TO_BACK_DELIVERY() \
+ (queue->last_done + 1 >= event_time())
+
+ /*
+ * Turn on session caching after we get up to speed. Don't enable
+ * session caching just because we have concurrent deliveries. This
+ * prevents unnecessary session caching when we have a burst of mail
+ * <= the initial concurrency limit.
*/
if ((queue->dflags & DEL_REQ_FLAG_SCACHE) == 0) {
- if (queue->last_done + 1 >= event_time()) {
+ if (BACK_TO_BACK_DELIVERY()) {
if (msg_verbose)
msg_info("%s: allowing on-demand session caching for %s",
myname, queue->name);
queue->dflags |= DEL_REQ_FLAG_SCACHE;
}
- } else {
- if (queue->last_done + 1 < event_time()) {
+ }
+
+ /*
+ * Turn off session caching when concurrency drops and we're running
+ * out of steam. This is what prevents from turning off session
+ * caching too early, and from making new connections while old ones
+ * are still cached.
+ */
+ else {
+ if (!CONCURRENT_OR_BACK_TO_BACK_DELIVERY()) {
if (msg_verbose)
msg_info("%s: disallowing on-demand session caching for %s",
myname, queue->name);
* only 1) save a session upon completion, but also 2) reuse a cached
* session upon the next delivery request. In order to not miss out
* on 2), we have to make caching sticky or else we get silly
- * behavior when the in-memory queue drains. New connections must not
- * be made while cached connections aren't being reused.
+ * behavior when the in-memory queue drains. Specifically, new
+ * connections must not be made as long as cached connections exist.
*
- * Safety: don't enable opportunistic session caching until the queue
- * manager is able to schedule back-to-back deliveries.
+ * Safety: don't enable opportunistic session caching unless the queue
+ * manager is able to schedule concurrent or back-to-back deliveries
+ * (we need to recognize back-to-back deliveries for transports with
+ * concurrency 1).
+ *
+ * XXX It would be nice if we could say "try to reuse a cached
+ * connection, but don't bother saving it when you're done". As long
+ * as we can't, we must not turn off session caching too early.
+ */
+#define CONCURRENT_OR_BACK_TO_BACK_DELIVERY() \
+ (queue->busy_refcount > 1 || BACK_TO_BACK_DELIVERY())
+
+#define BACK_TO_BACK_DELIVERY() \
+ (queue->last_done + 1 >= event_time())
+
+ /*
+ * Turn on session caching after we get up to speed. Don't enable
+ * session caching just because we have concurrent deliveries. This
+ * prevents unnecessary session caching when we have a burst of mail
+ * <= the initial concurrency limit.
*/
if ((queue->dflags & DEL_REQ_FLAG_SCACHE) == 0) {
- if (queue->last_done + 1 >= event_time()) {
+ if (BACK_TO_BACK_DELIVERY()) {
if (msg_verbose)
msg_info("%s: allowing on-demand session caching for %s",
myname, queue->name);
queue->dflags |= DEL_REQ_FLAG_SCACHE;
}
- } else {
- if (queue->last_done + 1 < event_time()) {
+ }
+
+ /*
+ * Turn off session caching when concurrency drops and we're running
+ * out of steam. This is what prevents from turning off session
+ * caching too early, and from making new connections while old ones
+ * are still cached.
+ */
+ else {
+ if (!CONCURRENT_OR_BACK_TO_BACK_DELIVERY()) {
if (msg_verbose)
msg_info("%s: disallowing on-demand session caching for %s",
myname, queue->name);
FULL, &found, reply_name, reply_class,
def_acl)) != 0 || found)
CHECK_SERVER_RETURN(status);
- h_errno = 0; /* XXX */
+ SET_H_ERRNO(0);
if ((hp = gethostbyname((char *) server->data)) == 0) {
msg_warn("Unable to look up %s host %s for %s %s: %s",
dns_strtype(type), (char *) server->data,
#define STATVFS_IN_SYS_STATVFS_H
#define UNIX_DOMAIN_CONNECT_BLOCKS_FOR_ACCEPT
#define STRCASECMP_IN_STRINGS_H
+#define SET_H_ERRNO(err) (set_h_errno(err))
#endif
#ifdef UW21 /* UnixWare 2.1.x */
#define SOCKADDR_SIZE socklen_t
#define SOCKOPT_SIZE socklen_t
#endif
-#define CANT_WRITE_BEFORE_SENDING_FD
+#include <linux/version.h>
+#if !defined(KERNEL_VERSION) || (LINUX_VERSION_CODE < KERNEL_VERSION(2,2,0)) \
+ || (__GLIBC__ < 2)
+# define CANT_USE_SEND_RECV_MSG
+# define DEF_SMTP_CACHE_DEMAND 0
+#else
+# define CANT_WRITE_BEFORE_SENDING_FD
+#endif
#endif
#ifdef LINUX1
#define NATIVE_NEWALIAS_PATH "/usr/bin/newaliases"
#define NATIVE_COMMAND_DIR "/usr/sbin"
#define NATIVE_DAEMON_DIR "/usr/libexec/postfix"
+#define CANT_USE_SEND_RECV_MSG
+#define DEF_SMTP_CACHE_DEMAND 0
#endif
/*
int unix_recv_fd(int fd)
{
char *myname = "unix_recv_fd";
+
+ /*
+ * This code does not work with version <2.2 Linux kernels, and it does
+ * not compile with version <2 Linux libraries.
+ */
+#ifdef CANT_USE_SEND_RECV_MSG
+ msg_warn("%s: your system has no support for file descriptor passing",
+ myname);
+ return (-1);
+#else
struct msghdr msg;
int newfd;
struct iovec iov[1];
else
return (-1);
#endif
+#endif
}
#ifdef TEST
int unix_send_fd(int fd, int sendfd)
{
+
+ /*
+ * This code does not work with version <2.2 Linux kernels, and it does
+ * not compile with version <2 Linux libraries.
+ */
+#ifdef CANT_USE_SEND_RECV_MSG
+ char *myname = "unix_send_fd";
+
+ msg_warn("%s: your system has no support for file descriptor passing",
+ myname);
+ return (-1);
+#else
struct msghdr msg;
struct iovec iov[1];
msg.msg_iovlen = 1;
return (sendmsg(fd, &msg, 0));
+#endif
}
#ifdef TEST