gboolean
handle_statfile_normalizer (struct config_file *cfg, struct rspamd_xml_userdata *ctx, GHashTable *attrs, gchar *data, gpointer user_data, gpointer dest_struct, int offset)
{
- struct statfile *st = ctx->section_pointer;
-
- if (!parse_normalizer (cfg, st, data)) {
- msg_err ("cannot parse normalizer string: %s", data);
- return FALSE;
- }
-
+ msg_info ("normalizer option is now not available as rspamd always use internal normalizer for winnow (hyperbolic tanhent)");
return TRUE;
}
#ifdef WITH_LUA
max = call_classifier_post_callbacks (ctx->cfg, task, max);
#endif
- if (st->normalizer != NULL) {
- max = st->normalizer (task->cfg, max, st->normalizer_data);
- }
+#ifdef HAVE_TANHL
+ max = tanhl (max);
+#else
+ /*
+ * As some implementations of libm does not support tanhl, try to use
+ * tanh
+ */
+ max = tanh ((double) score);
+#endif
sumbuf = memory_pool_alloc (task->task_pool, 32);
rspamd_snprintf (sumbuf, 32, "%.2F", max);
cur = g_list_prepend (NULL, sumbuf);
end:
if (sum) {
- *sum = (double)max;
+#ifdef HAVE_TANHL
+ *sum = (double)tanhl (max);
+#else
+ /*
+ * As some implementations of libm does not support tanhl, try to use
+ * tanh
+ */
+ *sum = tanh ((double) score);
+#endif
}
return TRUE;
}
session->worker->srv->stat->messages_learned++;
maybe_write_binlog (session->learn_classifier, st, statfile, tokens);
- if (st->normalizer != NULL) {
- sum = st->normalizer (session->cfg, sum, st->normalizer_data);
- }
msg_info ("learn success for message <%s>, for statfile: %s, sum weight: %.2f",
task->message_id, session->learn_symbol, sum);
free_task (task, FALSE);