* this pointer. We know we have reached the end when this
* pointer points back to the head of the streams list.
*/
+ SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
LIST_INIT(&appctx->ctx.map.bref.users);
appctx->ctx.map.bref.ref = appctx->ctx.map.ref->head.n;
+ SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
appctx->st2 = STAT_ST_LIST;
/* fall through */
case STAT_ST_LIST:
+
+ SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+
if (!LIST_ISEMPTY(&appctx->ctx.map.bref.users)) {
LIST_DEL(&appctx->ctx.map.bref.users);
LIST_INIT(&appctx->ctx.map.bref.users);
/* let's try again later from this stream. We add ourselves into
* this stream's users so that it can remove us upon termination.
*/
- si_applet_cant_put(si);
LIST_ADDQ(&elt->back_refs, &appctx->ctx.map.bref.users);
+ SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+ si_applet_cant_put(si);
return 0;
}
/* get next list entry and check the end of the list */
appctx->ctx.map.bref.ref = elt->list.n;
}
-
+ SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
appctx->st2 = STAT_ST_FIN;
/* fall through */
/* fall through */
case STAT_ST_LIST:
+ SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
/* for each lookup type */
while (appctx->ctx.map.expr) {
/* initialise chunk to build new message */
sample.flags = SMP_F_CONST;
sample.data.u.str.len = appctx->ctx.map.chunk.len;
sample.data.u.str.str = appctx->ctx.map.chunk.str;
+
if (appctx->ctx.map.expr->pat_head->match &&
sample_convert(&sample, appctx->ctx.map.expr->pat_head->expect_type))
pat = appctx->ctx.map.expr->pat_head->match(&sample, appctx->ctx.map.expr, 1);
/* let's try again later from this stream. We add ourselves into
* this stream's users so that it can remove us upon termination.
*/
+ SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
si_applet_cant_put(si);
return 0;
}
appctx->ctx.map.expr = pat_expr_get_next(appctx->ctx.map.expr,
&appctx->ctx.map.ref->pat);
}
-
+ SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
appctx->st2 = STAT_ST_FIN;
/* fall through */
static void cli_release_show_map(struct appctx *appctx)
{
if (appctx->st2 == STAT_ST_LIST) {
+ SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (!LIST_ISEMPTY(&appctx->ctx.map.bref.users))
LIST_DEL(&appctx->ctx.map.bref.users);
+ SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
}
}
/* Try to delete the entry. */
err = NULL;
+ SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (!pat_ref_set_by_id(appctx->ctx.map.ref, ref, args[4], &err)) {
+ SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (err)
memprintf(&err, "%s.\n", err);
appctx->ctx.cli.err = err;
appctx->st0 = CLI_ST_PRINT_FREE;
return 1;
}
+ SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
}
else {
/* Else, use the entry identifier as pattern
* string, and update the value.
*/
err = NULL;
+ SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (!pat_ref_set(appctx->ctx.map.ref, args[3], args[4], &err)) {
+ SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (err)
memprintf(&err, "%s.\n", err);
appctx->ctx.cli.err = err;
appctx->st0 = CLI_ST_PRINT_FREE;
return 1;
}
+ SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
}
/* The set is done, send message. */
/* Add value. */
err = NULL;
+ SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (appctx->ctx.map.display_flags == PAT_REF_MAP)
ret = pat_ref_add(appctx->ctx.map.ref, args[3], args[4], &err);
else
ret = pat_ref_add(appctx->ctx.map.ref, args[3], NULL, &err);
+ SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (!ret) {
if (err)
memprintf(&err, "%s.\n", err);
}
/* Try to delete the entry. */
+ SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (!pat_ref_delete_by_id(appctx->ctx.map.ref, ref)) {
+ SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
/* The entry is not found, send message. */
appctx->ctx.cli.severity = LOG_ERR;
appctx->ctx.cli.msg = "Key not found.\n";
appctx->st0 = CLI_ST_PRINT;
return 1;
}
+ SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
}
else {
/* Else, use the entry identifier as pattern
* string and try to delete the entry.
*/
+ SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (!pat_ref_delete(appctx->ctx.map.ref, args[3])) {
+ SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
/* The entry is not found, send message. */
appctx->ctx.cli.severity = LOG_ERR;
appctx->ctx.cli.msg = "Key not found.\n";
appctx->st0 = CLI_ST_PRINT;
return 1;
}
+ SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
}
/* The deletion is done, send message. */
}
/* Clear all. */
+ SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
pat_ref_prune(appctx->ctx.map.ref);
+ SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
/* return response */
appctx->st0 = CLI_ST_PROMPT;
};
/* this struct is used to return information */
-static struct pattern static_pattern;
+static THREAD_LOCAL struct pattern static_pattern;
+static THREAD_LOCAL struct sample_data static_sample_data;
/* This is the root of the list of all pattern_ref avalaibles. */
struct list pattern_reference = LIST_HEAD_INIT(pattern_reference);
static struct lru64_head *pat_lru_tree;
+#ifdef USE_THREAD
+HA_SPINLOCK_T pat_lru_tree_lock;
+#endif
static unsigned long long pat_lru_seed;
/*
if (pat_lru_tree) {
unsigned long long seed = pat_lru_seed ^ (long)expr;
+ SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
pat_lru_tree, expr, expr->revision);
- if (lru && lru->domain)
- return lru->data;
+ if (!lru) {
+ SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+ }
+ else if (lru->domain) {
+ ret = lru->data;
+ SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+ return ret;
+ }
}
+
list_for_each_entry(lst, &expr->patterns, list) {
pattern = &lst->pat;
}
}
- if (lru)
- lru64_commit(lru, ret, expr, expr->revision, NULL);
+ if (lru) {
+ lru64_commit(lru, ret, expr, expr->revision, NULL);
+ SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+ }
return ret;
}
if (pat_lru_tree) {
unsigned long long seed = pat_lru_seed ^ (long)expr;
+ SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
pat_lru_tree, expr, expr->revision);
- if (lru && lru->domain)
- return lru->data;
+ if (!lru) {
+ SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+ }
+ else if (lru->domain) {
+ ret = lru->data;
+ SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+ return ret;
+ }
}
list_for_each_entry(lst, &expr->patterns, list) {
}
}
- if (lru)
- lru64_commit(lru, ret, expr, expr->revision, NULL);
+ if (lru) {
+ lru64_commit(lru, ret, expr, expr->revision, NULL);
+ SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+ }
return ret;
}
if (pat_lru_tree) {
unsigned long long seed = pat_lru_seed ^ (long)expr;
+ SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
pat_lru_tree, expr, expr->revision);
- if (lru && lru->domain)
- return lru->data;
+ if (!lru) {
+ SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+ }
+ else if (lru->domain) {
+ ret = lru->data;
+ SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+ return ret;
+ }
}
list_for_each_entry(lst, &expr->patterns, list) {
}
}
- if (lru)
- lru64_commit(lru, ret, expr, expr->revision, NULL);
+ if (lru) {
+ lru64_commit(lru, ret, expr, expr->revision, NULL);
+ SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+ }
return ret;
}
if (pat_lru_tree) {
unsigned long long seed = pat_lru_seed ^ (long)expr;
+ SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
pat_lru_tree, expr, expr->revision);
- if (lru && lru->domain)
- return lru->data;
+ if (!lru) {
+ SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+ }
+ else if (lru->domain) {
+ ret = lru->data;
+ SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+ return ret;
+ }
}
list_for_each_entry(lst, &expr->patterns, list) {
break;
}
- if (lru)
- lru64_commit(lru, ret, expr, expr->revision, NULL);
+ if (lru) {
+ lru64_commit(lru, ret, expr, expr->revision, NULL);
+ SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+ }
return ret;
}
if (pat_lru_tree) {
unsigned long long seed = pat_lru_seed ^ (long)expr;
+ SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
pat_lru_tree, expr, expr->revision);
- if (lru && lru->domain)
- return lru->data;
+ if (!lru) {
+ SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+ }
+ else if (lru->domain) {
+ ret = lru->data;
+ SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+ return ret;
+ }
}
list_for_each_entry(lst, &expr->patterns, list) {
break;
}
- if (lru)
- lru64_commit(lru, ret, expr, expr->revision, NULL);
+ if (lru) {
+ lru64_commit(lru, ret, expr, expr->revision, NULL);
+ SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+ }
return ret;
}
if (pat_lru_tree) {
unsigned long long seed = pat_lru_seed ^ (long)expr;
+ SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
pat_lru_tree, expr, expr->revision);
- if (lru && lru->domain)
- return lru->data;
+ if (!lru) {
+ SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+ }
+ else if (lru->domain) {
+ ret = lru->data;
+ SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+ return ret;
+ }
}
list_for_each_entry(lst, &expr->patterns, list) {
}
}
leave:
- if (lru)
- lru64_commit(lru, ret, expr, expr->revision, NULL);
+ if (lru) {
+ lru64_commit(lru, ret, expr, expr->revision, NULL);
+ SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+ }
return ret;
}
list_for_each_entry(expr, &ref->pat, list)
pattern_delete(expr, elt);
+ /* pat_ref_elt is trashed once all expr
+ are cleaned and there is no ref remaining */
LIST_DEL(&elt->list);
free(elt->sample);
free(elt->pattern);
list_for_each_entry(expr, &ref->pat, list)
pattern_delete(expr, elt);
+ /* pat_ref_elt is trashed once all expr
+ are cleaned and there is no ref remaining */
LIST_DEL(&elt->list);
free(elt->sample);
free(elt->pattern);
memprintf(err, "out of memory error");
return 0;
}
- free(elt->sample);
- elt->sample = sample;
-
/* Load sample in each reference. All the conversion are tested
* below, normally these calls dosn't fail.
*/
if (!expr->pat_head->parse_smp)
continue;
+ RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
data = pattern_find_smp(expr, elt);
if (data && *data && !expr->pat_head->parse_smp(sample, *data))
*data = NULL;
+ RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
}
+ /* free old sample only when all exprs are updated */
+ free(elt->sample);
+ elt->sample = sample;
+
+
return 1;
}
LIST_INIT(&ref->head);
LIST_INIT(&ref->pat);
-
+ SPIN_INIT(&ref->lock);
LIST_ADDQ(&pattern_reference, &ref->list);
return ref;
return 0;
}
+ RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
/* index pattern */
if (!expr->pat_head->index(expr, &pattern, err)) {
+ RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
free(data);
return 0;
}
+ RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
return 1;
}
return 0;
}
}
+
return 1;
}
void pat_ref_reload(struct pat_ref *ref, struct pat_ref *replace)
{
struct pattern_expr *expr;
- struct pat_ref_elt *elt;
char *err = NULL;
+ struct pat_ref_elt *elt, *safe;
+ struct bref *bref, *back;
+ struct sample_data *data;
+ struct pattern pattern;
- pat_ref_prune(ref);
+ SPIN_LOCK(PATREF_LOCK, &ref->lock);
+ list_for_each_entry(expr, &ref->pat, list) {
+ RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
+ }
+
+ /* all expr are locked, we can safely remove all pat_ref */
+ list_for_each_entry_safe(elt, safe, &ref->head, list) {
+ list_for_each_entry_safe(bref, back, &elt->back_refs, users) {
+ /*
+ * we have to unlink all watchers. We must not relink them if
+ * this elt was the last one in the list.
+ */
+ LIST_DEL(&bref->users);
+ LIST_INIT(&bref->users);
+ if (elt->list.n != &ref->head)
+ LIST_ADDQ(&LIST_ELEM(elt->list.n, struct stream *, list)->back_refs, &bref->users);
+ bref->ref = elt->list.n;
+ }
+ LIST_DEL(&elt->list);
+ free(elt->pattern);
+ free(elt->sample);
+ free(elt);
+ }
+
+ /* switch pat_ret_elt lists */
LIST_ADD(&replace->head, &ref->head);
LIST_DEL(&replace->head);
- list_for_each_entry(elt, &ref->head, list) {
- list_for_each_entry(expr, &ref->pat, list) {
- if (!pat_ref_push(elt, expr, 0, &err)) {
+ list_for_each_entry(expr, &ref->pat, list) {
+ expr->pat_head->prune(expr);
+ list_for_each_entry(elt, &ref->head, list) {
+ /* Create sample */
+ if (elt->sample && expr->pat_head->parse_smp) {
+ /* New sample. */
+ data = malloc(sizeof(*data));
+ if (!data)
+ continue;
+
+ /* Parse value. */
+ if (!expr->pat_head->parse_smp(elt->sample, data)) {
+ memprintf(&err, "unable to parse '%s'", elt->sample);
+ send_log(NULL, LOG_NOTICE, "%s", err);
+ free(err);
+ free(data);
+ continue;
+ }
+
+ }
+ else
+ data = NULL;
+
+ /* initialise pattern */
+ memset(&pattern, 0, sizeof(pattern));
+ pattern.data = data;
+ pattern.ref = elt;
+
+ /* parse pattern */
+ if (!expr->pat_head->parse(elt->pattern, &pattern, expr->mflags, &err)) {
send_log(NULL, LOG_NOTICE, "%s", err);
free(err);
- err = NULL;
+ free(data);
+ continue;
+ }
+
+ /* index pattern */
+ if (!expr->pat_head->index(expr, &pattern, &err)) {
+ send_log(NULL, LOG_NOTICE, "%s", err);
+ free(err);
+ free(data);
+ continue;
}
}
+ RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
}
+ SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
}
/* This function prune all entries of <ref>. This function
struct pattern_expr *expr;
struct bref *bref, *back;
+ list_for_each_entry(expr, &ref->pat, list) {
+ RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
+ expr->pat_head->prune(expr);
+ RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
+ }
+
+ /* we trash pat_ref_elt in a second time to ensure that data is
+ free once there is no ref on it */
list_for_each_entry_safe(elt, safe, &ref->head, list) {
list_for_each_entry_safe(bref, back, &elt->back_refs, users) {
/*
free(elt);
}
- list_for_each_entry(expr, &ref->pat, list)
- expr->pat_head->prune(expr);
+
}
/* This function lookup for existing reference <ref> in pattern_head <head>. */
expr->ref = ref;
+ RWLOCK_INIT(&expr->lock);
+
/* We must free this pattern if it is no more used. */
list->do_free = 1;
}
return NULL;
list_for_each_entry(list, &head->head, list) {
+ RWLOCK_RDLOCK(PATEXP_LOCK, &list->expr->lock);
pat = head->match(smp, list->expr, fill);
- if (pat)
+ if (pat) {
+ /* We duplicate the pattern cause it could be modified
+ by another thread */
+ if (pat != &static_pattern) {
+ memcpy(&static_pattern, pat, sizeof(struct pattern));
+ pat = &static_pattern;
+ }
+
+ /* We also duplicate the sample data for
+ same reason */
+ if (pat->data && (pat->data != &static_sample_data)) {
+ switch(pat->type) {
+ case SMP_T_STR:
+ static_sample_data.type = SMP_T_STR;
+ static_sample_data.u.str = *get_trash_chunk();
+ static_sample_data.u.str.len = pat->data->u.str.len;
+ if (static_sample_data.u.str.len >= static_sample_data.u.str.size)
+ static_sample_data.u.str.len = static_sample_data.u.str.size - 1;
+ memcpy(static_sample_data.u.str.str, pat->data->u.str.str, static_sample_data.u.str.len);
+ static_sample_data.u.str.str[static_sample_data.u.str.len] = 0;
+ case SMP_T_IPV4:
+ case SMP_T_IPV6:
+ case SMP_T_SINT:
+ memcpy(&static_sample_data, pat->data, sizeof(struct sample_data));
+ default:
+ pat->data = NULL;
+ }
+ pat->data = &static_sample_data;
+ }
+ RWLOCK_RDUNLOCK(PATEXP_LOCK, &list->expr->lock);
return pat;
+ }
+ RWLOCK_RDUNLOCK(PATEXP_LOCK, &list->expr->lock);
}
return NULL;
}
LIST_DEL(&list->list);
if (list->do_free) {
LIST_DEL(&list->expr->list);
+ RWLOCK_WRLOCK(PATEXP_LOCK, &list->expr->lock);
head->prune(list->expr);
+ RWLOCK_WRUNLOCK(PATEXP_LOCK, &list->expr->lock);
free(list->expr);
}
free(list);
*/
int pattern_delete(struct pattern_expr *expr, struct pat_ref_elt *ref)
{
+ RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
expr->pat_head->delete(expr, ref);
+ RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
return 1;
}
struct list pr = LIST_HEAD_INIT(pr);
pat_lru_seed = random();
- if (global.tune.pattern_cache)
+ if (global.tune.pattern_cache) {
pat_lru_tree = lru64_new(global.tune.pattern_cache);
+ SPIN_INIT(&pat_lru_tree_lock);
+ }
list_for_each_entry(ref, &pattern_reference, list) {
if (ref->unique_id == -1) {