#if defined(AST_MISDN_ENHANCEMENTS)
if (bc->is_register_pool) {
+ pthread_mutex_lock(&stack->st_lock);
for (i = MAX_BCHANS + 1; i < ARRAY_LEN(stack->channels); ++i) {
if (!stack->channels[i]) {
chan = i + 1;
--channel;
+ pthread_mutex_lock(&stack->st_lock);
bnums = stack->pri ? stack->b_num : stack->b_num - 1;
if (dec) {
for (i = bnums; i >= 0; --i) {
}
if (!chan) {
+ pthread_mutex_unlock(&stack->st_lock);
cb_log(1, stack->port, " !! NO FREE CHAN IN STACK\n");
dump_chan_list(stack);
bc->out_cause = AST_CAUSE_NORMAL_CIRCUIT_CONGESTION;
}
if (set_chan_in_stack(stack, chan) < 0) {
+ pthread_mutex_unlock(&stack->st_lock);
cb_log(0, stack->port, "Channel Already in use:%d\n", chan);
bc->out_cause = AST_CAUSE_REQUESTED_CHAN_UNAVAIL;
return -1;
}
+ pthread_mutex_unlock(&stack->st_lock);
bc->channel = chan;
return 0;
if (global_state == MISDN_INITIALIZED) {
for (i = 0; i <= stack->b_num; ++i) {
cb_event(EVENT_CLEANUP, &stack->bc[i], NULL);
- empty_chan_in_stack(stack, i + 1);
empty_bc(&stack->bc[i]);
clean_up_bc(&stack->bc[i]);
+ empty_chan_in_stack(stack, i + 1);
stack->bc[i].in_use = 0;
}
#if defined(AST_MISDN_ENHANCEMENTS)
for (i = MAX_BCHANS + 1; i < ARRAY_LEN(stack->bc); ++i) {
- empty_chan_in_stack(stack, i + 1);
empty_bc(&stack->bc[i]);
+ empty_chan_in_stack(stack, i + 1);
stack->bc[i].in_use = 0;
}
#endif /* defined(AST_MISDN_ENHANCEMENTS) */
msg_queue_init(&stack->downqueue);
msg_queue_init(&stack->upqueue);
+ pthread_mutex_init(&stack->st_lock, NULL);
+
/* query port's requirements */
ret = mISDN_get_stack_info(midev, port, buff, sizeof(buff));
if (ret < 0) {
if (stack->upper_id)
mISDN_write_frame(stack->midev, buf, stack->upper_id, MGR_DELLAYER | REQUEST, 0, 0, NULL, TIMEOUT_1SEC);
+
+ pthread_mutex_destroy(&stack->st_lock);
}
int i;
for (i = 0; i <= stack->b_num; ++i) {
- if ((stack->bc[i].l3_id & mask) == (l3id & mask)) {
+ if (stack->bc[i].in_use && (stack->bc[i].l3_id & mask) == (l3id & mask)) {
return &stack->bc[i];
}
}
#if defined(AST_MISDN_ENHANCEMENTS)
/* Search the B channel records for a REGISTER signaling link. */
for (i = MAX_BCHANS + 1; i < ARRAY_LEN(stack->bc); ++i) {
- if ((stack->bc[i].l3_id & mask) == (l3id & mask)) {
+ if (stack->bc[i].in_use && (stack->bc[i].l3_id & mask) == (l3id & mask)) {
return &stack->bc[i];
}
}
int i;
for (i = 0; i <= stack->b_num; ++i) {
- if (stack->bc[i].l3_id == l3id) {
+ if (stack->bc[i].in_use && stack->bc[i].l3_id == l3id) {
return &stack->bc[i];
}
}
#if defined(AST_MISDN_ENHANCEMENTS)
/* Search the B channel records for a REGISTER signaling link. */
for (i = MAX_BCHANS + 1; i < ARRAY_LEN(stack->bc); ++i) {
- if (stack->bc[i].l3_id == l3id) {
+ if (stack->bc[i].in_use && stack->bc[i].l3_id == l3id) {
return &stack->bc[i];
}
}
struct misdn_stack *stack;
int i;
- for (stack=glob_mgr->stack_list;
- stack;
- stack=stack->next) {
- for (i=0; i<=stack->b_num; i++) {
- if ( (stack->bc[i].addr&STACK_ID_MASK)==(addr&STACK_ID_MASK) || stack->bc[i].layer_id== addr ) {
+ for (stack = glob_mgr->stack_list; stack; stack = stack->next) {
+ for (i = 0; i <= stack->b_num; i++) {
+ if (stack->bc[i].in_use
+ && ((stack->bc[i].addr & STACK_ID_MASK) == (addr & STACK_ID_MASK)
+ || stack->bc[i].layer_id == addr)) {
return &stack->bc[i];
}
}
struct misdn_stack *stack;
int i;
- for (stack=glob_mgr->stack_list;
- stack;
- stack=stack->next) {
- for (i=0; i<=stack->b_num; i++) {
- if ( stack->bc[i].conf_id==confid ) {
+ for (stack = glob_mgr->stack_list; stack; stack = stack->next) {
+ for (i = 0; i <= stack->b_num; i++) {
+ if (stack->bc[i].in_use && stack->bc[i].conf_id == confid) {
return &stack->bc[i];
}
}
struct misdn_stack *stack = find_stack_by_port(port);
int i;
- if (!stack) return NULL;
+ if (!stack) {
+ return NULL;
+ }
- for (i=0; i<=stack->b_num; i++) {
- if ( stack->bc[i].channel== channel ) {
+ for (i = 0; i <= stack->b_num; i++) {
+ if (stack->bc[i].in_use && stack->bc[i].channel == channel) {
return &stack->bc[i];
}
}
/* Empties bc if it's reserved (no SETUP out yet) */
void misdn_lib_release(struct misdn_bchannel *bc)
{
+ int channel;
struct misdn_stack *stack=get_stack_by_bc(bc);
if (!stack) {
return;
}
- if (bc->channel>0)
- empty_chan_in_stack(stack,bc->channel);
-
+ channel = bc->channel;
empty_bc(bc);
clean_up_bc(bc);
+ if (channel > 0) {
+ empty_chan_in_stack(stack, channel);
+ }
bc->in_use=0;
}
bc->cause = tmpcause;
bc->out_cause = tmp_out_cause;
clean_up_bc(bc);
+ bc->in_use = 0;
if (tmpcause == AST_CAUSE_REQUESTED_CHAN_UNAVAIL) {
- cb_log(0, stack->port, "**** Received CAUSE:%d, so not cleaning up channel %d\n", AST_CAUSE_REQUESTED_CHAN_UNAVAIL, channel);
- cb_log(0, stack->port, "**** This channel is now no longer available,\nplease try to restart it with 'misdn send restart <port> <channel>'\n");
- set_chan_in_stack(stack, channel);
- bc->channel = channel;
+ cb_log(0, stack->port, "**** Received CAUSE:%d, restarting channel %d\n", AST_CAUSE_REQUESTED_CHAN_UNAVAIL, channel);
misdn_lib_send_restart(stack->port, channel);
- } else if (channel > 0) {
+ }
+ if (channel > 0) {
empty_chan_in_stack(stack, channel);
}
- bc->in_use = 0;
break;
case EVENT_RESTART:
- cb_log(0, stack->port, "**** Received RESTART_ACK channel:%d\n", bc->restart_channel);
+ cb_log(0, stack->port, "**** Received RESTART channel:%d\n", bc->restart_channel);
empty_chan_in_stack(stack, bc->restart_channel);
break;
default:
struct misdn_stack *stack;
int i;
- for (stack=glob_mgr->stack_list;
- stack;
- stack=stack->next) {
- for (i=0; i<=stack->b_num; i++)
- if (stack->bc[i].pid == pid) return &stack->bc[i];
+ for (stack = glob_mgr->stack_list; stack; stack = stack->next) {
+ for (i = 0; i <= stack->b_num; i++) {
+ if (stack->bc[i].in_use && stack->bc[i].pid == pid) {
+ return &stack->bc[i];
+ }
+ }
}
return NULL;
{
bc->channel = channel;
bc->channel_preselected = channel?1:0;
- bc->in_use = 1;
bc->need_disconnect=1;
bc->need_release=1;
bc->need_release_complete=1;
bc->b_stid=0;
bc->layer_id=0;
#endif
+
+ bc->in_use = 1;
}
struct misdn_bchannel *misdn_lib_get_free_bc(int port, int channel, int inout, int dec)
return NULL;
}
- usleep(1000);
-
/* Find the port stack structure */
stack = find_stack_by_port(port);
if (!stack) {
return NULL;
}
+ pthread_mutex_lock(&stack->st_lock);
if (channel > 0) {
if (channel <= stack->b_num) {
for (i = 0; i < stack->b_num; i++) {
if (stack->bc[i].channel == channel) {
if (test_inuse(&stack->bc[i])) {
+ pthread_mutex_unlock(&stack->st_lock);
cb_log(0, port, "Requested channel:%d on port:%d is already in use\n", channel, port);
return NULL;
} else {
prepare_bc(&stack->bc[i], channel);
+ pthread_mutex_unlock(&stack->st_lock);
return &stack->bc[i];
}
}
}
} else {
+ pthread_mutex_unlock(&stack->st_lock);
cb_log(0, port, "Requested channel:%d is out of bounds on port:%d\n", channel, port);
return NULL;
}
prepare_bc(&stack->bc[i], channel);
stack->bc[i].dec = 1;
+ pthread_mutex_unlock(&stack->st_lock);
return &stack->bc[i];
}
}
}
prepare_bc(&stack->bc[i], channel);
+ pthread_mutex_unlock(&stack->st_lock);
return &stack->bc[i];
}
}
}
+ pthread_mutex_unlock(&stack->st_lock);
cb_log(1, port, "There is no free channel on port (%d)\n", port);
return NULL;
return NULL;
}
+ pthread_mutex_lock(&stack->st_lock);
for (index = MAX_BCHANS + 1; index < ARRAY_LEN(stack->bc); ++index) {
bc = &stack->bc[index];
if (!test_inuse(bc)) {
prepare_bc(bc, 0);
bc->need_disconnect = 0;
bc->need_release = 0;
+ pthread_mutex_unlock(&stack->st_lock);
return bc;
}
}
+ pthread_mutex_unlock(&stack->st_lock);
cb_log(1, port, "There is no free REGISTER link on port (%d)\n", port);
return NULL;
/* clean up chan in stack, to be sure we don't think it's
* in use anymore */
for (cnt=0; cnt<=stack->b_num; cnt++) {
- if (stack->bc[cnt].channel == channel) {
+ if (stack->bc[cnt].in_use && stack->bc[cnt].channel == channel) {
empty_bc(&stack->bc[cnt]);
clean_up_bc(&stack->bc[cnt]);
stack->bc[cnt].in_use=0;