Documentation: log the "*" pattern as the last transport
map lookup. File: proto/transport.
+
+20090122
+
+ Bugfix: the data_directory was not automatically created!
+ File: conf/postfix-files.
+
+20090304
+
+ Cleanup: skip over suspended or throttled queues while
+ looking for delivery requests. File: *qmgr/qmgr_transport.c.
+
+20090305
+
+ Bugfix: in the "new queue manager", the _destination_rate_delay
+ code needed to postpone the job scheduler updates after
+ delivery completion, otherwise the scheduler could loop on
+ blocked jobs. Victor & Wietse. File: qmgr/qmgr_entry.c,
+ qmgr/qmgr_queue.c, qmgr/qmgr_job.c.
+
+ Cleanup: report a "queue file write error", instead of
+ passing though bogus 2xx replies from proxy filters to SMTP
+ clients. File: smtpd/smtpd_proxy.c.
+
+20090310
+
+ Bugfix: Postfix used mumble_concurrency_failed_cohort_limit
+ instead of mumble_destination_concurrency_failed_cohort_limit
+ as documented. File: global/mail_params.h.
+
+20090419
+
+ Bugfix: don't re-enable SIGHUP if it is ignored in the
+ parent. This may cause random "Postfix integrity check
+ failed" errors at boot time (POSIX SIGHUP death), causing
+ Postfix not to start. We duplicate code from postdrop and
+ thus avoid past mistakes. File: postsuper/postsuper.c.
+
+ Robustness: don't re-enable SIGTERM if it is ignored in the
+ parent. Files: postsuper/postsuper.c, postdrop/postdrop.c.
+
+20090428
+
+ Bugfix: don't disable MIME parsing with smtp_header_checks,
+ smtp_mime_header_checks, smtp_nested_header_checks or with
+ smtp_body_checks. Bug reported by Victor. File: smtp/smtp_proto.c.
# permissions, so that running "make install" fixes any glitches.
#
$config_directory:d:root:-:755:u
-$data_directory:d:$mail_owner:-:700:u
+$data_directory:d:$mail_owner:-:700:uc
$daemon_directory:d:root:-:755:u
$queue_directory:d:root:-:755:uc
$sample_directory:d:root:-:755:o
<p> A pseudo-cohort is the number of deliveries equal to a destination's
delivery concurrency. </p>
-<p> Use <a href="postconf.5.html#transport_destination_concurrency_positive_feedback"><i>transport</i>_destination_concurrency_negative_feedback</a>
+<p> Use <a href="postconf.5.html#transport_destination_concurrency_negative_feedback"><i>transport</i>_destination_concurrency_negative_feedback</a>
to specify a transport-specific override, where <i>transport</i>
is the <a href="master.5.html">master.cf</a>
name of the message delivery transport. </p>
<blockquote>
<pre>
/etc/postfix/<a href="master.5.html">master.cf</a>:
- mylmtp ... lmtp -o <a href="postconf.5.html#lmtp_lhloname">lmtp_lhlo_name</a>=foo.bar.com
+ mylmtp ... lmtp -o <a href="postconf.5.html#lmtp_lhlo_name">lmtp_lhlo_name</a>=foo.bar.com
</pre>
</blockquote>
s;\blmtp_tls_note_starttls_offer\b;<a href="postconf.5.html#lmtp_tls_note_starttls_offer">$&</a>;g;
s;\blmtp_sender_dependent_authentication\b;<a href="postconf.5.html#lmtp_sender_dependent_authentication">$&</a>;g;
s;\blmtp_sasl_path\b;<a href="postconf.5.html#lmtp_sasl_path">$&</a>;g;
- s;\blmtp_lhlo_name\b;<a href="postconf.5.html#lmtp_lhloname">$&</a>;g;
+ s;\blmtp_lhlo_name\b;<a href="postconf.5.html#lmtp_lhlo_name">$&</a>;g;
s;\blmtp_connect_timeout\b;<a href="postconf.5.html#lmtp_connect_timeout">$&</a>;g;
s;\blmtp_data_done_timeout\b;<a href="postconf.5.html#lmtp_data_done_timeout">$&</a>;g;
s;\blmtp_data_init_timeout\b;<a href="postconf.5.html#lmtp_data_init_timeout">$&</a>;g;
# Transport-dependent magical parameters.
s;(<i>transport</i>)(<b>)?(_destination_concurrency_failed_cohort_limit)\b;$2<a href="postconf.5.html#transport_destination_concurrency_failed_cohort_limit">$1$3</a>;g;
- s;(<i>transport</i>)(<b>)?(_destination_concurrency_negative_feedback)\b;$2<a href="postconf.5.html#transport_destination_concurrency_positive_feedback">$1$3</a>;g;
+ s;(<i>transport</i>)(<b>)?(_destination_concurrency_negative_feedback)\b;$2<a href="postconf.5.html#transport_destination_concurrency_negative_feedback">$1$3</a>;g;
s;(<i>transport</i>)(<b>)?(_destination_concurrency_positive_feedback)\b;$2<a href="postconf.5.html#transport_destination_concurrency_positive_feedback">$1$3</a>;g;
s;(<i>transport</i>)(<b>)?(_delivery_slot_cost)\b;$2<a href="postconf.5.html#transport_delivery_slot_cost">$1$3</a>;g;
s;(<i>transport</i>)(<b>)?(_delivery_slot_discount)\b;$2<a href="postconf.5.html#transport_delivery_slot_discount">$1$3</a>;g;
* Scheduler concurrency feedback algorithms.
*/
#define VAR_CONC_POS_FDBACK "default_destination_concurrency_positive_feedback"
-#define _CONC_POS_FDBACK "_concurrency_positive_feedback"
+#define _CONC_POS_FDBACK "_destination_concurrency_positive_feedback"
#define DEF_CONC_POS_FDBACK "1"
extern char *var_conc_pos_feedback;
#define VAR_CONC_NEG_FDBACK "default_destination_concurrency_negative_feedback"
-#define _CONC_NEG_FDBACK "_concurrency_negative_feedback"
+#define _CONC_NEG_FDBACK "_destination_concurrency_negative_feedback"
#define DEF_CONC_NEG_FDBACK "1"
extern char *var_conc_neg_feedback;
#define CONC_FDBACK_NAME_SQRT_WIN "sqrt_concurrency"
#define VAR_CONC_COHORT_LIM "default_destination_concurrency_failed_cohort_limit"
-#define _CONC_COHORT_LIM "_concurrency_failed_cohort_limit"
+#define _CONC_COHORT_LIM "_destination_concurrency_failed_cohort_limit"
#define DEF_CONC_COHORT_LIM 1
extern int var_conc_cohort_limit;
* Patches change both the patchlevel and the release date. Snapshots have no
* patchlevel; they change the release date only.
*/
-#define MAIL_RELEASE_DATE "20090103"
-#define MAIL_VERSION_NUMBER "2.5.6"
+#define MAIL_RELEASE_DATE "20090512"
+#define MAIL_VERSION_NUMBER "2.5.7"
#ifdef SNAPSHOT
# define MAIL_VERSION_DATE "-" MAIL_RELEASE_DATE
continue;
need = xport->pending + 1;
for (queue = xport->queue_list.next; queue; queue = queue->peers.next) {
+ if (QMGR_QUEUE_READY(queue) == 0)
+ continue;
if ((need -= MIN5af51743e4eef(queue->window - queue->busy_refcount,
queue->todo_refcount)) <= 0) {
QMGR_LIST_ROTATE(qmgr_transport_list, xport);
signal(SIGINT, postdrop_sig);
signal(SIGQUIT, postdrop_sig);
- signal(SIGTERM, postdrop_sig);
+ if (signal(SIGTERM, SIG_IGN) == SIG_DFL)
+ signal(SIGTERM, postdrop_sig);
if (signal(SIGHUP, SIG_IGN) == SIG_DFL)
signal(SIGHUP, postdrop_sig);
msg_cleanup(postdrop_cleanup);
/*
* This commands requires root privileges. We therefore do not worry
* about hostile signals, and report problems via msg_warn().
+ *
+ * We use the in-kernel SIGINT handler address as an atomic variable to
+ * prevent nested interrupted() calls. For this reason, main() must
+ * configure interrupted() as SIGINT handler before other signal handlers
+ * are allowed to invoke interrupted(). See also similar code in
+ * postdrop.
*/
- if (signal(SIGHUP, SIG_IGN) != SIG_IGN) {
- (void) signal(SIGINT, SIG_IGN);
+ if (signal(SIGINT, SIG_IGN) != SIG_IGN) {
(void) signal(SIGQUIT, SIG_IGN);
(void) signal(SIGTERM, SIG_IGN);
+ (void) signal(SIGHUP, SIG_IGN);
if (inode_mismatch > 0 || inode_fixed > 0 || position_mismatch > 0)
msg_warn("OPERATION INCOMPLETE -- RERUN COMMAND TO FIX THE QUEUE FIRST");
if (sig)
*
* Set up signal handlers after permanently dropping super-user privileges,
* so that signal handlers will always run with the correct privileges.
+ *
+ * XXX Don't enable SIGHUP or SIGTERM if it was ignored by the parent.
+ *
+ * interrupted() uses the in-kernel SIGINT handler address as an atomic
+ * variable to prevent nested interrupted() calls. For this reason, the
+ * SIGINT handler must be configured before other signal handlers are
+ * allowed to invoke interrupted(). See also similar code in postdrop.
*/
- signal(SIGHUP, interrupted);
signal(SIGINT, interrupted);
signal(SIGQUIT, interrupted);
- signal(SIGTERM, interrupted);
+ if (signal(SIGTERM, SIG_IGN) == SIG_DFL)
+ signal(SIGTERM, interrupted);
+ if (signal(SIGHUP, SIG_IGN) == SIG_DFL)
+ signal(SIGHUP, interrupted);
msg_cleanup(fatal_warning);
/*
extern QMGR_ENTRY *qmgr_job_entry_select(QMGR_TRANSPORT *);
extern QMGR_PEER *qmgr_peer_select(QMGR_JOB *);
+extern void qmgr_job_blocker_update(QMGR_QUEUE *);
extern QMGR_JOB *qmgr_job_obtain(QMGR_MESSAGE *, QMGR_TRANSPORT *);
extern void qmgr_job_free(QMGR_JOB *);
}
/*
- * If the queue was blocking some of the jobs on the job list, check if
- * the concurrency limit has lifted. If there are still some pending
- * deliveries, give it a try and unmark all transport blockers at once.
- * The qmgr_job_entry_select() will do the rest. In either case make sure
- * the queue is not marked as a blocker anymore, with extra handling of
- * queues which were declared dead.
+ * We implement a rate-limited queue by emulating a slow delivery
+ * channel. We insert the artificial delays with qmgr_queue_suspend().
*
- * Note that changing the blocker status also affects the candidate cache.
- * Most of the cases would be automatically recognized by the current job
- * change, but we play safe and reset the cache explicitly below.
- *
- * Keeping the transport blocker tag odd is an easy way to make sure the tag
- * never matches jobs that are not explicitly marked as blockers.
+ * When a queue is suspended, we must postpone any job scheduling decisions
+ * until the queue is resumed. Otherwise, we make those decisions now.
+ * The job scheduling decisions are made by qmgr_job_blocker_update().
*/
- if (queue->blocker_tag == transport->blocker_tag) {
- if (queue->window > queue->busy_refcount && queue->todo.next != 0) {
- transport->blocker_tag += 2;
- transport->job_current = transport->job_list.next;
- transport->candidate_cache_current = 0;
- }
- if (queue->window > queue->busy_refcount || QMGR_QUEUE_THROTTLED(queue))
- queue->blocker_tag = 0;
+ if (which == QMGR_QUEUE_BUSY && transport->rate_delay > 0) {
+ if (queue->window > 1)
+ msg_panic("%s: queue %s/%s: window %d > 1 on rate-limited service",
+ myname, transport->name, queue->name, queue->window);
+ if (QMGR_QUEUE_THROTTLED(queue)) /* XXX */
+ qmgr_queue_unthrottle(queue);
+ if (QMGR_QUEUE_READY(queue))
+ qmgr_queue_suspend(queue, transport->rate_delay);
}
+ if (!QMGR_QUEUE_SUSPENDED(queue)
+ && queue->blocker_tag == transport->blocker_tag)
+ qmgr_job_blocker_update(queue);
/*
* When there are no more entries for this peer, discard the peer
if (which == QMGR_QUEUE_BUSY)
queue->last_done = event_time();
- /*
- * Suspend a rate-limited queue, so that mail trickles out.
- */
- if (which == QMGR_QUEUE_BUSY && transport->rate_delay > 0) {
- if (queue->window > 1)
- msg_panic("%s: queue %s/%s: window %d > 1 on rate-limited service",
- myname, transport->name, queue->name, queue->window);
- if (QMGR_QUEUE_THROTTLED(queue)) /* XXX */
- qmgr_queue_unthrottle(queue);
- if (QMGR_QUEUE_READY(queue))
- qmgr_queue_suspend(queue, transport->rate_delay);
- }
-
/*
* When the in-core queue for this site is empty and when this site is
* not dead or suspended, discard the in-core queue. When this site is
/*
/* QMGR_ENTRY *qmgr_job_entry_select(transport)
/* QMGR_TRANSPORT *transport;
+/*
+/* void qmgr_job_blocker_update(queue)
+/* QMGR_QUEUE *queue;
/* DESCRIPTION
/* These routines add/delete/manipulate per-transport jobs.
/* Each job corresponds to a specific transport and message.
/* If necessary, an attempt to read more recipients into core is made.
/* This can result in creation of more job, queue and entry structures.
/*
+/* qmgr_job_blocker_update() updates the status of blocked
+/* jobs after a decrease in the queue's concurrency level,
+/* after the queue is throttled, or after the queue is resumed
+/* from suspension.
+/*
/* qmgr_job_move_limits() takes care of proper distribution of the
/* per-transport recipients limit among the per-transport jobs.
/* Should be called whenever a job's recipient slot becomes available.
transport->job_current = 0;
return (0);
}
+
+/* qmgr_job_blocker_update - update "blocked job" status */
+
+void qmgr_job_blocker_update(QMGR_QUEUE *queue)
+{
+ QMGR_TRANSPORT *transport = queue->transport;
+
+ /*
+ * If the queue was blocking some of the jobs on the job list, check if
+ * the concurrency limit has lifted. If there are still some pending
+ * deliveries, give it a try and unmark all transport blockers at once.
+ * The qmgr_job_entry_select() will do the rest. In either case make sure
+ * the queue is not marked as a blocker anymore, with extra handling of
+ * queues which were declared dead.
+ *
+ * Note that changing the blocker status also affects the candidate cache.
+ * Most of the cases would be automatically recognized by the current job
+ * change, but we play safe and reset the cache explicitly below.
+ *
+ * Keeping the transport blocker tag odd is an easy way to make sure the tag
+ * never matches jobs that are not explicitly marked as blockers.
+ */
+ if (queue->blocker_tag == transport->blocker_tag) {
+ if (queue->window > queue->busy_refcount && queue->todo.next != 0) {
+ transport->blocker_tag += 2;
+ transport->job_current = transport->job_list.next;
+ transport->candidate_cache_current = 0;
+ }
+ if (queue->window > queue->busy_refcount || QMGR_QUEUE_THROTTLED(queue))
+ queue->blocker_tag = 0;
+ }
+}
+
/* "slow open" mode, and eliminates the "thundering herd" problem.
/*
/* qmgr_queue_suspend() suspends delivery for this destination
-/* briefly.
+/* briefly. This function invalidates any scheduling decisions
+/* that are based on the present queue's concurrency window.
+/* To compensate for work skipped by qmgr_entry_done(), the
+/* status of blocker jobs is re-evaluated after the queue is
+/* resumed.
/* DIAGNOSTICS
/* Panic: consistency check failure.
/* LICENSE
/*
* Every event handler that leaves a queue in the "ready" state should
* remove the queue when it is empty.
+ *
+ * XXX Do not omit the redundant test below. It is here to simplify code
+ * consistency checks. The check is trivially eliminated by the compiler
+ * optimizer. There is no need to sacrifice code clarity for the sake of
+ * performance.
+ *
+ * XXX Do not expose the blocker job logic here. Rate-limited queues are not
+ * a performance-critical feature. Here, too, there is no need to sacrifice
+ * code clarity for the sake of performance.
*/
if (QMGR_QUEUE_READY(queue) && queue->todo.next == 0 && queue->busy.next == 0)
qmgr_queue_done(queue);
+ else
+ qmgr_job_blocker_update(queue);
}
/* qmgr_queue_suspend - briefly suspend a destination */
continue;
need = xport->pending + 1;
for (queue = xport->queue_list.next; queue; queue = queue->peers.next) {
+ if (QMGR_QUEUE_READY(queue) == 0)
+ continue;
if ((need -= MIN5af51743e4eef(queue->window - queue->busy_refcount,
queue->todo_refcount)) <= 0) {
QMGR_LIST_ROTATE(qmgr_transport_list, xport, peers);
* XXX Don't downgrade just because generic_maps is turned
* on.
*/
- if (downgrading || smtp_generic_maps || smtp_header_checks
- || smtp_body_checks)
+#define SMTP_ANY_CHECKS (smtp_header_checks || smtp_body_checks)
+
+ if (downgrading || smtp_generic_maps || SMTP_ANY_CHECKS)
session->mime_state = mime_state_alloc(downgrading ?
MIME_OPT_DOWNGRADE
| MIME_OPT_REPORT_NESTING :
- MIME_OPT_DISABLE_MIME,
+ SMTP_ANY_CHECKS == 0 ?
+ MIME_OPT_DISABLE_MIME :
+ 0,
smtp_generic_maps
|| smtp_header_checks ?
smtp_header_rewrite :
/*
* Log a warning in case the proxy does not send the expected response.
* Silently accept any response when the client expressed no expectation.
+ *
+ * Don't pass through misleading 2xx replies. it confuses naive users and
+ * SMTP clients, and creates support problems.
*/
if (expect != SMTPD_PROX_WANT_ANY && expect != *STR(state->proxy_buffer)) {
va_start(ap, fmt);
smtpd_proxy_cmd_error(state, fmt, ap);
va_end(ap);
+ if (*STR(state->proxy_buffer) == SMTPD_PROX_WANT_OK
+ || *STR(state->proxy_buffer) == SMTPD_PROX_WANT_MORE) {
+ state->error_mask |= MAIL_ERROR_SOFTWARE;
+ state->err |= CLEANUP_STAT_PROXY;
+ detail = cleanup_stat_detail(CLEANUP_STAT_PROXY);
+ vstring_sprintf(state->proxy_buffer,
+ "%d %s Error: %s",
+ detail->smtp, detail->dsn, detail->text);
+ }
return (-1);
} else {
return (0);