]> git.ipfire.org Git - thirdparty/strongswan.git/blob - src/libcharon/sa/ike_sa_manager.c
ike-sa-manager: Remove IKE_SA checkout by CHILD_SA reqid
[thirdparty/strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2012 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 */
18
19 #include <string.h>
20
21 #include "ike_sa_manager.h"
22
23 #include <daemon.h>
24 #include <sa/ike_sa_id.h>
25 #include <bus/bus.h>
26 #include <threading/condvar.h>
27 #include <threading/mutex.h>
28 #include <threading/rwlock.h>
29 #include <collections/linked_list.h>
30 #include <crypto/hashers/hasher.h>
31 #include <processing/jobs/delete_ike_sa_job.h>
32
33 /* the default size of the hash table (MUST be a power of 2) */
34 #define DEFAULT_HASHTABLE_SIZE 1
35
36 /* the maximum size of the hash table (MUST be a power of 2) */
37 #define MAX_HASHTABLE_SIZE (1 << 30)
38
39 /* the default number of segments (MUST be a power of 2) */
40 #define DEFAULT_SEGMENT_COUNT 1
41
42 typedef struct entry_t entry_t;
43
44 /**
45 * An entry in the linked list, contains IKE_SA, locking and lookup data.
46 */
47 struct entry_t {
48
49 /**
50 * Number of threads waiting for this ike_sa_t object.
51 */
52 int waiting_threads;
53
54 /**
55 * Condvar where threads can wait until ike_sa_t object is free for use again.
56 */
57 condvar_t *condvar;
58
59 /**
60 * Is this ike_sa currently checked out?
61 */
62 bool checked_out;
63
64 /**
65 * Does this SA drives out new threads?
66 */
67 bool driveout_new_threads;
68
69 /**
70 * Does this SA drives out waiting threads?
71 */
72 bool driveout_waiting_threads;
73
74 /**
75 * Identification of an IKE_SA (SPIs).
76 */
77 ike_sa_id_t *ike_sa_id;
78
79 /**
80 * The contained ike_sa_t object.
81 */
82 ike_sa_t *ike_sa;
83
84 /**
85 * hash of the IKE_SA_INIT message, used to detect retransmissions
86 */
87 chunk_t init_hash;
88
89 /**
90 * remote host address, required for DoS detection and duplicate
91 * checking (host with same my_id and other_id is *not* considered
92 * a duplicate if the address family differs)
93 */
94 host_t *other;
95
96 /**
97 * As responder: Is this SA half-open?
98 */
99 bool half_open;
100
101 /**
102 * own identity, required for duplicate checking
103 */
104 identification_t *my_id;
105
106 /**
107 * remote identity, required for duplicate checking
108 */
109 identification_t *other_id;
110
111 /**
112 * message ID or hash of currently processing message, -1 if none
113 */
114 u_int32_t processing;
115 };
116
117 /**
118 * Implementation of entry_t.destroy.
119 */
120 static status_t entry_destroy(entry_t *this)
121 {
122 /* also destroy IKE SA */
123 this->ike_sa->destroy(this->ike_sa);
124 this->ike_sa_id->destroy(this->ike_sa_id);
125 chunk_free(&this->init_hash);
126 DESTROY_IF(this->other);
127 DESTROY_IF(this->my_id);
128 DESTROY_IF(this->other_id);
129 this->condvar->destroy(this->condvar);
130 free(this);
131 return SUCCESS;
132 }
133
134 /**
135 * Creates a new entry for the ike_sa_t list.
136 */
137 static entry_t *entry_create()
138 {
139 entry_t *this;
140
141 INIT(this,
142 .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
143 .processing = -1,
144 );
145
146 return this;
147 }
148
149 /**
150 * Function that matches entry_t objects by ike_sa_id_t.
151 */
152 static bool entry_match_by_id(entry_t *entry, ike_sa_id_t *id)
153 {
154 if (id->equals(id, entry->ike_sa_id))
155 {
156 return TRUE;
157 }
158 if ((id->get_responder_spi(id) == 0 ||
159 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
160 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
161 {
162 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
163 return TRUE;
164 }
165 return FALSE;
166 }
167
168 /**
169 * Function that matches entry_t objects by ike_sa_t pointers.
170 */
171 static bool entry_match_by_sa(entry_t *entry, ike_sa_t *ike_sa)
172 {
173 return entry->ike_sa == ike_sa;
174 }
175
176 /**
177 * Hash function for ike_sa_id_t objects.
178 */
179 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
180 {
181 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
182 * locally unique, so we use our randomly allocated SPI whether we are
183 * initiator or responder to ensure a good distribution. The latter is not
184 * possible for IKEv1 as we don't know whether we are original initiator or
185 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
186 * SPIs (Cookies) to be allocated near random (we allocate them randomly
187 * anyway) it seems safe to always use the initiator SPI. */
188 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
189 ike_sa_id->is_initiator(ike_sa_id))
190 {
191 return ike_sa_id->get_initiator_spi(ike_sa_id);
192 }
193 return ike_sa_id->get_responder_spi(ike_sa_id);
194 }
195
196 typedef struct half_open_t half_open_t;
197
198 /**
199 * Struct to manage half-open IKE_SAs per peer.
200 */
201 struct half_open_t {
202 /** chunk of remote host address */
203 chunk_t other;
204
205 /** the number of half-open IKE_SAs with that host */
206 u_int count;
207 };
208
209 /**
210 * Destroys a half_open_t object.
211 */
212 static void half_open_destroy(half_open_t *this)
213 {
214 chunk_free(&this->other);
215 free(this);
216 }
217
218 typedef struct connected_peers_t connected_peers_t;
219
220 struct connected_peers_t {
221 /** own identity */
222 identification_t *my_id;
223
224 /** remote identity */
225 identification_t *other_id;
226
227 /** ip address family of peer */
228 int family;
229
230 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
231 linked_list_t *sas;
232 };
233
234 static void connected_peers_destroy(connected_peers_t *this)
235 {
236 this->my_id->destroy(this->my_id);
237 this->other_id->destroy(this->other_id);
238 this->sas->destroy(this->sas);
239 free(this);
240 }
241
242 /**
243 * Function that matches connected_peers_t objects by the given ids.
244 */
245 static inline bool connected_peers_match(connected_peers_t *connected_peers,
246 identification_t *my_id, identification_t *other_id,
247 int family)
248 {
249 return my_id->equals(my_id, connected_peers->my_id) &&
250 other_id->equals(other_id, connected_peers->other_id) &&
251 (!family || family == connected_peers->family);
252 }
253
254 typedef struct init_hash_t init_hash_t;
255
256 struct init_hash_t {
257 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
258 chunk_t hash;
259
260 /** our SPI allocated for the IKE_SA based on this message */
261 u_int64_t our_spi;
262 };
263
264 typedef struct segment_t segment_t;
265
266 /**
267 * Struct to manage segments of the hash table.
268 */
269 struct segment_t {
270 /** mutex to access a segment exclusively */
271 mutex_t *mutex;
272
273 /** the number of entries in this segment */
274 u_int count;
275 };
276
277 typedef struct shareable_segment_t shareable_segment_t;
278
279 /**
280 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
281 */
282 struct shareable_segment_t {
283 /** rwlock to access a segment non-/exclusively */
284 rwlock_t *lock;
285
286 /** the number of entries in this segment - in case of the "half-open table"
287 * it's the sum of all half_open_t.count in a segment. */
288 u_int count;
289 };
290
291 typedef struct table_item_t table_item_t;
292
293 /**
294 * Instead of using linked_list_t for each bucket we store the data in our own
295 * list to save memory.
296 */
297 struct table_item_t {
298 /** data of this item */
299 void *value;
300
301 /** next item in the overflow list */
302 table_item_t *next;
303 };
304
305 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
306
307 /**
308 * Additional private members of ike_sa_manager_t.
309 */
310 struct private_ike_sa_manager_t {
311 /**
312 * Public interface of ike_sa_manager_t.
313 */
314 ike_sa_manager_t public;
315
316 /**
317 * Hash table with entries for the ike_sa_t objects.
318 */
319 table_item_t **ike_sa_table;
320
321 /**
322 * The size of the hash table.
323 */
324 u_int table_size;
325
326 /**
327 * Mask to map the hashes to table rows.
328 */
329 u_int table_mask;
330
331 /**
332 * Segments of the hash table.
333 */
334 segment_t *segments;
335
336 /**
337 * The number of segments.
338 */
339 u_int segment_count;
340
341 /**
342 * Mask to map a table row to a segment.
343 */
344 u_int segment_mask;
345
346 /**
347 * Hash table with half_open_t objects.
348 */
349 table_item_t **half_open_table;
350
351 /**
352 * Segments of the "half-open" hash table.
353 */
354 shareable_segment_t *half_open_segments;
355
356 /**
357 * Total number of half-open IKE_SAs.
358 */
359 refcount_t half_open_count;
360
361 /**
362 * Hash table with connected_peers_t objects.
363 */
364 table_item_t **connected_peers_table;
365
366 /**
367 * Segments of the "connected peers" hash table.
368 */
369 shareable_segment_t *connected_peers_segments;
370
371 /**
372 * Hash table with init_hash_t objects.
373 */
374 table_item_t **init_hashes_table;
375
376 /**
377 * Segments of the "hashes" hash table.
378 */
379 segment_t *init_hashes_segments;
380
381 /**
382 * RNG to get random SPIs for our side
383 */
384 rng_t *rng;
385
386 /**
387 * reuse existing IKE_SAs in checkout_by_config
388 */
389 bool reuse_ikesa;
390
391 /**
392 * Configured IKE_SA limit, if any
393 */
394 u_int ikesa_limit;
395 };
396
397 /**
398 * Acquire a lock to access the segment of the table row with the given index.
399 * It also works with the segment index directly.
400 */
401 static inline void lock_single_segment(private_ike_sa_manager_t *this,
402 u_int index)
403 {
404 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
405 lock->lock(lock);
406 }
407
408 /**
409 * Release the lock required to access the segment of the table row with the given index.
410 * It also works with the segment index directly.
411 */
412 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
413 u_int index)
414 {
415 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
416 lock->unlock(lock);
417 }
418
419 /**
420 * Lock all segments
421 */
422 static void lock_all_segments(private_ike_sa_manager_t *this)
423 {
424 u_int i;
425
426 for (i = 0; i < this->segment_count; i++)
427 {
428 this->segments[i].mutex->lock(this->segments[i].mutex);
429 }
430 }
431
432 /**
433 * Unlock all segments
434 */
435 static void unlock_all_segments(private_ike_sa_manager_t *this)
436 {
437 u_int i;
438
439 for (i = 0; i < this->segment_count; i++)
440 {
441 this->segments[i].mutex->unlock(this->segments[i].mutex);
442 }
443 }
444
445 typedef struct private_enumerator_t private_enumerator_t;
446
447 /**
448 * hash table enumerator implementation
449 */
450 struct private_enumerator_t {
451
452 /**
453 * implements enumerator interface
454 */
455 enumerator_t enumerator;
456
457 /**
458 * associated ike_sa_manager_t
459 */
460 private_ike_sa_manager_t *manager;
461
462 /**
463 * current segment index
464 */
465 u_int segment;
466
467 /**
468 * currently enumerating entry
469 */
470 entry_t *entry;
471
472 /**
473 * current table row index
474 */
475 u_int row;
476
477 /**
478 * current table item
479 */
480 table_item_t *current;
481
482 /**
483 * previous table item
484 */
485 table_item_t *prev;
486 };
487
488 METHOD(enumerator_t, enumerate, bool,
489 private_enumerator_t *this, entry_t **entry, u_int *segment)
490 {
491 if (this->entry)
492 {
493 this->entry->condvar->signal(this->entry->condvar);
494 this->entry = NULL;
495 }
496 while (this->segment < this->manager->segment_count)
497 {
498 while (this->row < this->manager->table_size)
499 {
500 this->prev = this->current;
501 if (this->current)
502 {
503 this->current = this->current->next;
504 }
505 else
506 {
507 lock_single_segment(this->manager, this->segment);
508 this->current = this->manager->ike_sa_table[this->row];
509 }
510 if (this->current)
511 {
512 *entry = this->entry = this->current->value;
513 *segment = this->segment;
514 return TRUE;
515 }
516 unlock_single_segment(this->manager, this->segment);
517 this->row += this->manager->segment_count;
518 }
519 this->segment++;
520 this->row = this->segment;
521 }
522 return FALSE;
523 }
524
525 METHOD(enumerator_t, enumerator_destroy, void,
526 private_enumerator_t *this)
527 {
528 if (this->entry)
529 {
530 this->entry->condvar->signal(this->entry->condvar);
531 }
532 if (this->current)
533 {
534 unlock_single_segment(this->manager, this->segment);
535 }
536 free(this);
537 }
538
539 /**
540 * Creates an enumerator to enumerate the entries in the hash table.
541 */
542 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
543 {
544 private_enumerator_t *enumerator;
545
546 INIT(enumerator,
547 .enumerator = {
548 .enumerate = (void*)_enumerate,
549 .destroy = _enumerator_destroy,
550 },
551 .manager = this,
552 );
553 return &enumerator->enumerator;
554 }
555
556 /**
557 * Put an entry into the hash table.
558 * Note: The caller has to unlock the returned segment.
559 */
560 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
561 {
562 table_item_t *current, *item;
563 u_int row, segment;
564
565 INIT(item,
566 .value = entry,
567 );
568
569 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
570 segment = row & this->segment_mask;
571
572 lock_single_segment(this, segment);
573 current = this->ike_sa_table[row];
574 if (current)
575 { /* insert at the front of current bucket */
576 item->next = current;
577 }
578 this->ike_sa_table[row] = item;
579 this->segments[segment].count++;
580 return segment;
581 }
582
583 /**
584 * Remove an entry from the hash table.
585 * Note: The caller MUST have a lock on the segment of this entry.
586 */
587 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
588 {
589 table_item_t *item, *prev = NULL;
590 u_int row, segment;
591
592 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
593 segment = row & this->segment_mask;
594 item = this->ike_sa_table[row];
595 while (item)
596 {
597 if (item->value == entry)
598 {
599 if (prev)
600 {
601 prev->next = item->next;
602 }
603 else
604 {
605 this->ike_sa_table[row] = item->next;
606 }
607 this->segments[segment].count--;
608 free(item);
609 break;
610 }
611 prev = item;
612 item = item->next;
613 }
614 }
615
616 /**
617 * Remove the entry at the current enumerator position.
618 */
619 static void remove_entry_at(private_enumerator_t *this)
620 {
621 this->entry = NULL;
622 if (this->current)
623 {
624 table_item_t *current = this->current;
625
626 this->manager->segments[this->segment].count--;
627 this->current = this->prev;
628
629 if (this->prev)
630 {
631 this->prev->next = current->next;
632 }
633 else
634 {
635 this->manager->ike_sa_table[this->row] = current->next;
636 unlock_single_segment(this->manager, this->segment);
637 }
638 free(current);
639 }
640 }
641
642 /**
643 * Find an entry using the provided match function to compare the entries for
644 * equality.
645 */
646 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
647 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
648 linked_list_match_t match, void *param)
649 {
650 table_item_t *item;
651 u_int row, seg;
652
653 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
654 seg = row & this->segment_mask;
655
656 lock_single_segment(this, seg);
657 item = this->ike_sa_table[row];
658 while (item)
659 {
660 if (match(item->value, param))
661 {
662 *entry = item->value;
663 *segment = seg;
664 /* the locked segment has to be unlocked by the caller */
665 return SUCCESS;
666 }
667 item = item->next;
668 }
669 unlock_single_segment(this, seg);
670 return NOT_FOUND;
671 }
672
673 /**
674 * Find an entry by ike_sa_id_t.
675 * Note: On SUCCESS, the caller has to unlock the segment.
676 */
677 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
678 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
679 {
680 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
681 (linked_list_match_t)entry_match_by_id, ike_sa_id);
682 }
683
684 /**
685 * Find an entry by IKE_SA pointer.
686 * Note: On SUCCESS, the caller has to unlock the segment.
687 */
688 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
689 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
690 {
691 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
692 (linked_list_match_t)entry_match_by_sa, ike_sa);
693 }
694
695 /**
696 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
697 * acquirable.
698 */
699 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
700 u_int segment)
701 {
702 if (entry->driveout_new_threads)
703 {
704 /* we are not allowed to get this */
705 return FALSE;
706 }
707 while (entry->checked_out && !entry->driveout_waiting_threads)
708 {
709 /* so wait until we can get it for us.
710 * we register us as waiting. */
711 entry->waiting_threads++;
712 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
713 entry->waiting_threads--;
714 }
715 /* hm, a deletion request forbids us to get this SA, get next one */
716 if (entry->driveout_waiting_threads)
717 {
718 /* we must signal here, others may be waiting on it, too */
719 entry->condvar->signal(entry->condvar);
720 return FALSE;
721 }
722 return TRUE;
723 }
724
725 /**
726 * Put a half-open SA into the hash table.
727 */
728 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
729 {
730 table_item_t *item;
731 u_int row, segment;
732 rwlock_t *lock;
733 half_open_t *half_open;
734 chunk_t addr;
735
736 addr = entry->other->get_address(entry->other);
737 row = chunk_hash(addr) & this->table_mask;
738 segment = row & this->segment_mask;
739 lock = this->half_open_segments[segment].lock;
740 lock->write_lock(lock);
741 item = this->half_open_table[row];
742 while (item)
743 {
744 half_open = item->value;
745
746 if (chunk_equals(addr, half_open->other))
747 {
748 half_open->count++;
749 break;
750 }
751 item = item->next;
752 }
753
754 if (!item)
755 {
756 INIT(half_open,
757 .other = chunk_clone(addr),
758 .count = 1,
759 );
760 INIT(item,
761 .value = half_open,
762 .next = this->half_open_table[row],
763 );
764 this->half_open_table[row] = item;
765 }
766 this->half_open_segments[segment].count++;
767 ref_get(&this->half_open_count);
768 lock->unlock(lock);
769 }
770
771 /**
772 * Remove a half-open SA from the hash table.
773 */
774 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
775 {
776 table_item_t *item, *prev = NULL;
777 u_int row, segment;
778 rwlock_t *lock;
779 chunk_t addr;
780
781 addr = entry->other->get_address(entry->other);
782 row = chunk_hash(addr) & this->table_mask;
783 segment = row & this->segment_mask;
784 lock = this->half_open_segments[segment].lock;
785 lock->write_lock(lock);
786 item = this->half_open_table[row];
787 while (item)
788 {
789 half_open_t *half_open = item->value;
790
791 if (chunk_equals(addr, half_open->other))
792 {
793 if (--half_open->count == 0)
794 {
795 if (prev)
796 {
797 prev->next = item->next;
798 }
799 else
800 {
801 this->half_open_table[row] = item->next;
802 }
803 half_open_destroy(half_open);
804 free(item);
805 }
806 this->half_open_segments[segment].count--;
807 ignore_result(ref_put(&this->half_open_count));
808 break;
809 }
810 prev = item;
811 item = item->next;
812 }
813 lock->unlock(lock);
814 }
815
816 /**
817 * Put an SA between two peers into the hash table.
818 */
819 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
820 {
821 table_item_t *item;
822 u_int row, segment;
823 rwlock_t *lock;
824 connected_peers_t *connected_peers;
825 chunk_t my_id, other_id;
826 int family;
827
828 my_id = entry->my_id->get_encoding(entry->my_id);
829 other_id = entry->other_id->get_encoding(entry->other_id);
830 family = entry->other->get_family(entry->other);
831 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
832 segment = row & this->segment_mask;
833 lock = this->connected_peers_segments[segment].lock;
834 lock->write_lock(lock);
835 item = this->connected_peers_table[row];
836 while (item)
837 {
838 connected_peers = item->value;
839
840 if (connected_peers_match(connected_peers, entry->my_id,
841 entry->other_id, family))
842 {
843 if (connected_peers->sas->find_first(connected_peers->sas,
844 (linked_list_match_t)entry->ike_sa_id->equals,
845 NULL, entry->ike_sa_id) == SUCCESS)
846 {
847 lock->unlock(lock);
848 return;
849 }
850 break;
851 }
852 item = item->next;
853 }
854
855 if (!item)
856 {
857 INIT(connected_peers,
858 .my_id = entry->my_id->clone(entry->my_id),
859 .other_id = entry->other_id->clone(entry->other_id),
860 .family = family,
861 .sas = linked_list_create(),
862 );
863 INIT(item,
864 .value = connected_peers,
865 .next = this->connected_peers_table[row],
866 );
867 this->connected_peers_table[row] = item;
868 }
869 connected_peers->sas->insert_last(connected_peers->sas,
870 entry->ike_sa_id->clone(entry->ike_sa_id));
871 this->connected_peers_segments[segment].count++;
872 lock->unlock(lock);
873 }
874
875 /**
876 * Remove an SA between two peers from the hash table.
877 */
878 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
879 {
880 table_item_t *item, *prev = NULL;
881 u_int row, segment;
882 rwlock_t *lock;
883 chunk_t my_id, other_id;
884 int family;
885
886 my_id = entry->my_id->get_encoding(entry->my_id);
887 other_id = entry->other_id->get_encoding(entry->other_id);
888 family = entry->other->get_family(entry->other);
889
890 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
891 segment = row & this->segment_mask;
892
893 lock = this->connected_peers_segments[segment].lock;
894 lock->write_lock(lock);
895 item = this->connected_peers_table[row];
896 while (item)
897 {
898 connected_peers_t *current = item->value;
899
900 if (connected_peers_match(current, entry->my_id, entry->other_id,
901 family))
902 {
903 enumerator_t *enumerator;
904 ike_sa_id_t *ike_sa_id;
905
906 enumerator = current->sas->create_enumerator(current->sas);
907 while (enumerator->enumerate(enumerator, &ike_sa_id))
908 {
909 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
910 {
911 current->sas->remove_at(current->sas, enumerator);
912 ike_sa_id->destroy(ike_sa_id);
913 this->connected_peers_segments[segment].count--;
914 break;
915 }
916 }
917 enumerator->destroy(enumerator);
918 if (current->sas->get_count(current->sas) == 0)
919 {
920 if (prev)
921 {
922 prev->next = item->next;
923 }
924 else
925 {
926 this->connected_peers_table[row] = item->next;
927 }
928 connected_peers_destroy(current);
929 free(item);
930 }
931 break;
932 }
933 prev = item;
934 item = item->next;
935 }
936 lock->unlock(lock);
937 }
938
939 /**
940 * Get a random SPI for new IKE_SAs
941 */
942 static u_int64_t get_spi(private_ike_sa_manager_t *this)
943 {
944 u_int64_t spi;
945
946 if (this->rng &&
947 this->rng->get_bytes(this->rng, sizeof(spi), (u_int8_t*)&spi))
948 {
949 return spi;
950 }
951 return 0;
952 }
953
954 /**
955 * Calculate the hash of the initial IKE message. Memory for the hash is
956 * allocated on success.
957 *
958 * @returns TRUE on success
959 */
960 static bool get_init_hash(hasher_t *hasher, message_t *message, chunk_t *hash)
961 {
962 host_t *src;
963
964 if (message->get_first_payload_type(message) == PLV1_FRAGMENT)
965 { /* only hash the source IP, port and SPI for fragmented init messages */
966 u_int16_t port;
967 u_int64_t spi;
968
969 src = message->get_source(message);
970 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
971 {
972 return FALSE;
973 }
974 port = src->get_port(src);
975 if (!hasher->allocate_hash(hasher, chunk_from_thing(port), NULL))
976 {
977 return FALSE;
978 }
979 spi = message->get_initiator_spi(message);
980 return hasher->allocate_hash(hasher, chunk_from_thing(spi), hash);
981 }
982 if (message->get_exchange_type(message) == ID_PROT)
983 { /* include the source for Main Mode as the hash will be the same if
984 * SPIs are reused by two initiators that use the same proposal */
985 src = message->get_source(message);
986
987 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
988 {
989 return FALSE;
990 }
991 }
992 return hasher->allocate_hash(hasher, message->get_packet_data(message), hash);
993 }
994
995 /**
996 * Check if we already have created an IKE_SA based on the initial IKE message
997 * with the given hash.
998 * If not the hash is stored, the hash data is not(!) cloned.
999 *
1000 * Also, the local SPI is returned. In case of a retransmit this is already
1001 * stored together with the hash, otherwise it is newly allocated and should
1002 * be used to create the IKE_SA.
1003 *
1004 * @returns ALREADY_DONE if the message with the given hash has been seen before
1005 * NOT_FOUND if the message hash was not found
1006 * FAILED if the SPI allocation failed
1007 */
1008 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1009 chunk_t init_hash, u_int64_t *our_spi)
1010 {
1011 table_item_t *item;
1012 u_int row, segment;
1013 mutex_t *mutex;
1014 init_hash_t *init;
1015 u_int64_t spi;
1016
1017 row = chunk_hash(init_hash) & this->table_mask;
1018 segment = row & this->segment_mask;
1019 mutex = this->init_hashes_segments[segment].mutex;
1020 mutex->lock(mutex);
1021 item = this->init_hashes_table[row];
1022 while (item)
1023 {
1024 init_hash_t *current = item->value;
1025
1026 if (chunk_equals(init_hash, current->hash))
1027 {
1028 *our_spi = current->our_spi;
1029 mutex->unlock(mutex);
1030 return ALREADY_DONE;
1031 }
1032 item = item->next;
1033 }
1034
1035 spi = get_spi(this);
1036 if (!spi)
1037 {
1038 return FAILED;
1039 }
1040
1041 INIT(init,
1042 .hash = {
1043 .len = init_hash.len,
1044 .ptr = init_hash.ptr,
1045 },
1046 .our_spi = spi,
1047 );
1048 INIT(item,
1049 .value = init,
1050 .next = this->init_hashes_table[row],
1051 );
1052 this->init_hashes_table[row] = item;
1053 *our_spi = init->our_spi;
1054 mutex->unlock(mutex);
1055 return NOT_FOUND;
1056 }
1057
1058 /**
1059 * Remove the hash of an initial IKE message from the cache.
1060 */
1061 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1062 {
1063 table_item_t *item, *prev = NULL;
1064 u_int row, segment;
1065 mutex_t *mutex;
1066
1067 row = chunk_hash(init_hash) & this->table_mask;
1068 segment = row & this->segment_mask;
1069 mutex = this->init_hashes_segments[segment].mutex;
1070 mutex->lock(mutex);
1071 item = this->init_hashes_table[row];
1072 while (item)
1073 {
1074 init_hash_t *current = item->value;
1075
1076 if (chunk_equals(init_hash, current->hash))
1077 {
1078 if (prev)
1079 {
1080 prev->next = item->next;
1081 }
1082 else
1083 {
1084 this->init_hashes_table[row] = item->next;
1085 }
1086 free(current);
1087 free(item);
1088 break;
1089 }
1090 prev = item;
1091 item = item->next;
1092 }
1093 mutex->unlock(mutex);
1094 }
1095
1096 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1097 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1098 {
1099 ike_sa_t *ike_sa = NULL;
1100 entry_t *entry;
1101 u_int segment;
1102
1103 DBG2(DBG_MGR, "checkout IKE_SA");
1104
1105 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1106 {
1107 if (wait_for_entry(this, entry, segment))
1108 {
1109 entry->checked_out = TRUE;
1110 ike_sa = entry->ike_sa;
1111 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1112 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1113 }
1114 unlock_single_segment(this, segment);
1115 }
1116 charon->bus->set_sa(charon->bus, ike_sa);
1117 return ike_sa;
1118 }
1119
1120 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1121 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1122 {
1123 ike_sa_id_t *ike_sa_id;
1124 ike_sa_t *ike_sa;
1125 u_int8_t ike_version;
1126 u_int64_t spi;
1127
1128 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1129
1130 spi = get_spi(this);
1131 if (!spi)
1132 {
1133 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1134 return NULL;
1135 }
1136
1137 if (initiator)
1138 {
1139 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1140 }
1141 else
1142 {
1143 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1144 }
1145 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1146 ike_sa_id->destroy(ike_sa_id);
1147
1148 if (ike_sa)
1149 {
1150 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1151 ike_sa->get_unique_id(ike_sa));
1152 }
1153 return ike_sa;
1154 }
1155
1156 /**
1157 * Get the message ID or message hash to detect early retransmissions
1158 */
1159 static u_int32_t get_message_id_or_hash(message_t *message)
1160 {
1161 /* Use the message ID, or the message hash in IKEv1 Main/Aggressive mode */
1162 if (message->get_major_version(message) == IKEV1_MAJOR_VERSION &&
1163 message->get_message_id(message) == 0)
1164 {
1165 return chunk_hash(message->get_packet_data(message));
1166 }
1167 return message->get_message_id(message);
1168 }
1169
1170 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1171 private_ike_sa_manager_t* this, message_t *message)
1172 {
1173 u_int segment;
1174 entry_t *entry;
1175 ike_sa_t *ike_sa = NULL;
1176 ike_sa_id_t *id;
1177 ike_version_t ike_version;
1178 bool is_init = FALSE;
1179
1180 id = message->get_ike_sa_id(message);
1181 /* clone the IKE_SA ID so we can modify the initiator flag */
1182 id = id->clone(id);
1183 id->switch_initiator(id);
1184
1185 DBG2(DBG_MGR, "checkout IKE_SA by message");
1186
1187 if (id->get_responder_spi(id) == 0)
1188 {
1189 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1190 {
1191 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1192 message->get_request(message))
1193 {
1194 ike_version = IKEV2;
1195 is_init = TRUE;
1196 }
1197 }
1198 else
1199 {
1200 if (message->get_exchange_type(message) == ID_PROT ||
1201 message->get_exchange_type(message) == AGGRESSIVE)
1202 {
1203 ike_version = IKEV1;
1204 is_init = TRUE;
1205 if (id->is_initiator(id))
1206 { /* not set in IKEv1, switch back before applying to new SA */
1207 id->switch_initiator(id);
1208 }
1209 }
1210 }
1211 }
1212
1213 if (is_init)
1214 {
1215 hasher_t *hasher;
1216 u_int64_t our_spi;
1217 chunk_t hash;
1218
1219 hasher = lib->crypto->create_hasher(lib->crypto, HASH_SHA1);
1220 if (!hasher || !get_init_hash(hasher, message, &hash))
1221 {
1222 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1223 DESTROY_IF(hasher);
1224 id->destroy(id);
1225 return NULL;
1226 }
1227 hasher->destroy(hasher);
1228
1229 /* ensure this is not a retransmit of an already handled init message */
1230 switch (check_and_put_init_hash(this, hash, &our_spi))
1231 {
1232 case NOT_FOUND:
1233 { /* we've not seen this packet yet, create a new IKE_SA */
1234 if (!this->ikesa_limit ||
1235 this->public.get_count(&this->public) < this->ikesa_limit)
1236 {
1237 id->set_responder_spi(id, our_spi);
1238 ike_sa = ike_sa_create(id, FALSE, ike_version);
1239 if (ike_sa)
1240 {
1241 entry = entry_create();
1242 entry->ike_sa = ike_sa;
1243 entry->ike_sa_id = id;
1244
1245 segment = put_entry(this, entry);
1246 entry->checked_out = TRUE;
1247 unlock_single_segment(this, segment);
1248
1249 entry->processing = get_message_id_or_hash(message);
1250 entry->init_hash = hash;
1251
1252 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1253 ike_sa->get_name(ike_sa),
1254 ike_sa->get_unique_id(ike_sa));
1255
1256 charon->bus->set_sa(charon->bus, ike_sa);
1257 return ike_sa;
1258 }
1259 else
1260 {
1261 DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1262 }
1263 }
1264 else
1265 {
1266 DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1267 exchange_type_names, message->get_exchange_type(message),
1268 this->ikesa_limit);
1269 }
1270 remove_init_hash(this, hash);
1271 chunk_free(&hash);
1272 id->destroy(id);
1273 return NULL;
1274 }
1275 case FAILED:
1276 { /* we failed to allocate an SPI */
1277 chunk_free(&hash);
1278 id->destroy(id);
1279 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1280 return NULL;
1281 }
1282 case ALREADY_DONE:
1283 default:
1284 break;
1285 }
1286 /* it looks like we already handled this init message to some degree */
1287 id->set_responder_spi(id, our_spi);
1288 chunk_free(&hash);
1289 }
1290
1291 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1292 {
1293 /* only check out if we are not already processing it. */
1294 if (entry->processing == get_message_id_or_hash(message))
1295 {
1296 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1297 entry->processing);
1298 }
1299 else if (wait_for_entry(this, entry, segment))
1300 {
1301 ike_sa_id_t *ike_id;
1302
1303 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1304 entry->checked_out = TRUE;
1305 if (message->get_first_payload_type(message) != PLV1_FRAGMENT &&
1306 message->get_first_payload_type(message) != PLV2_FRAGMENT)
1307 { /* TODO-FRAG: this fails if there are unencrypted payloads */
1308 entry->processing = get_message_id_or_hash(message);
1309 }
1310 if (ike_id->get_responder_spi(ike_id) == 0)
1311 {
1312 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1313 }
1314 ike_sa = entry->ike_sa;
1315 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1316 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1317 }
1318 unlock_single_segment(this, segment);
1319 }
1320 else
1321 {
1322 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1323 }
1324 id->destroy(id);
1325 charon->bus->set_sa(charon->bus, ike_sa);
1326 return ike_sa;
1327 }
1328
1329 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1330 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1331 {
1332 enumerator_t *enumerator;
1333 entry_t *entry;
1334 ike_sa_t *ike_sa = NULL;
1335 peer_cfg_t *current_peer;
1336 ike_cfg_t *current_ike;
1337 u_int segment;
1338
1339 DBG2(DBG_MGR, "checkout IKE_SA by config");
1340
1341 if (!this->reuse_ikesa)
1342 { /* IKE_SA reuse disable by config */
1343 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1344 charon->bus->set_sa(charon->bus, ike_sa);
1345 return ike_sa;
1346 }
1347
1348 enumerator = create_table_enumerator(this);
1349 while (enumerator->enumerate(enumerator, &entry, &segment))
1350 {
1351 if (!wait_for_entry(this, entry, segment))
1352 {
1353 continue;
1354 }
1355 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING)
1356 { /* skip IKE_SAs which are not usable */
1357 continue;
1358 }
1359
1360 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1361 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1362 {
1363 current_ike = current_peer->get_ike_cfg(current_peer);
1364 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1365 {
1366 entry->checked_out = TRUE;
1367 ike_sa = entry->ike_sa;
1368 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1369 ike_sa->get_unique_id(ike_sa),
1370 current_peer->get_name(current_peer));
1371 break;
1372 }
1373 }
1374 }
1375 enumerator->destroy(enumerator);
1376
1377 if (!ike_sa)
1378 { /* no IKE_SA using such a config, hand out a new */
1379 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1380 }
1381 charon->bus->set_sa(charon->bus, ike_sa);
1382 return ike_sa;
1383 }
1384
1385 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1386 private_ike_sa_manager_t *this, u_int32_t id)
1387 {
1388 enumerator_t *enumerator;
1389 entry_t *entry;
1390 ike_sa_t *ike_sa = NULL;
1391 u_int segment;
1392
1393 DBG2(DBG_MGR, "checkout IKE_SA by ID %u", id);
1394
1395 enumerator = create_table_enumerator(this);
1396 while (enumerator->enumerate(enumerator, &entry, &segment))
1397 {
1398 if (wait_for_entry(this, entry, segment))
1399 {
1400 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1401 {
1402 ike_sa = entry->ike_sa;
1403 entry->checked_out = TRUE;
1404 break;
1405 }
1406 }
1407 }
1408 enumerator->destroy(enumerator);
1409
1410 if (ike_sa)
1411 {
1412 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1413 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1414 }
1415 charon->bus->set_sa(charon->bus, ike_sa);
1416 return ike_sa;
1417 }
1418
1419 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1420 private_ike_sa_manager_t *this, char *name, bool child)
1421 {
1422 enumerator_t *enumerator, *children;
1423 entry_t *entry;
1424 ike_sa_t *ike_sa = NULL;
1425 child_sa_t *child_sa;
1426 u_int segment;
1427
1428 enumerator = create_table_enumerator(this);
1429 while (enumerator->enumerate(enumerator, &entry, &segment))
1430 {
1431 if (wait_for_entry(this, entry, segment))
1432 {
1433 /* look for a child with such a policy name ... */
1434 if (child)
1435 {
1436 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1437 while (children->enumerate(children, (void**)&child_sa))
1438 {
1439 if (streq(child_sa->get_name(child_sa), name))
1440 {
1441 ike_sa = entry->ike_sa;
1442 break;
1443 }
1444 }
1445 children->destroy(children);
1446 }
1447 else /* ... or for a IKE_SA with such a connection name */
1448 {
1449 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1450 {
1451 ike_sa = entry->ike_sa;
1452 }
1453 }
1454 /* got one, return */
1455 if (ike_sa)
1456 {
1457 entry->checked_out = TRUE;
1458 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1459 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1460 break;
1461 }
1462 }
1463 }
1464 enumerator->destroy(enumerator);
1465
1466 charon->bus->set_sa(charon->bus, ike_sa);
1467 return ike_sa;
1468 }
1469
1470 /**
1471 * enumerator filter function, waiting variant
1472 */
1473 static bool enumerator_filter_wait(private_ike_sa_manager_t *this,
1474 entry_t **in, ike_sa_t **out, u_int *segment)
1475 {
1476 if (wait_for_entry(this, *in, *segment))
1477 {
1478 *out = (*in)->ike_sa;
1479 charon->bus->set_sa(charon->bus, *out);
1480 return TRUE;
1481 }
1482 return FALSE;
1483 }
1484
1485 /**
1486 * enumerator filter function, skipping variant
1487 */
1488 static bool enumerator_filter_skip(private_ike_sa_manager_t *this,
1489 entry_t **in, ike_sa_t **out, u_int *segment)
1490 {
1491 if (!(*in)->driveout_new_threads &&
1492 !(*in)->driveout_waiting_threads &&
1493 !(*in)->checked_out)
1494 {
1495 *out = (*in)->ike_sa;
1496 charon->bus->set_sa(charon->bus, *out);
1497 return TRUE;
1498 }
1499 return FALSE;
1500 }
1501
1502 /**
1503 * Reset threads SA after enumeration
1504 */
1505 static void reset_sa(void *data)
1506 {
1507 charon->bus->set_sa(charon->bus, NULL);
1508 }
1509
1510 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1511 private_ike_sa_manager_t* this, bool wait)
1512 {
1513 return enumerator_create_filter(create_table_enumerator(this),
1514 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1515 this, reset_sa);
1516 }
1517
1518 METHOD(ike_sa_manager_t, checkin, void,
1519 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1520 {
1521 /* to check the SA back in, we look for the pointer of the ike_sa
1522 * in all entries.
1523 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1524 * on reception of a IKE_SA_INIT response) the lookup will work but
1525 * updating of the SPI MAY be necessary...
1526 */
1527 entry_t *entry;
1528 ike_sa_id_t *ike_sa_id;
1529 host_t *other;
1530 identification_t *my_id, *other_id;
1531 u_int segment;
1532
1533 ike_sa_id = ike_sa->get_id(ike_sa);
1534 my_id = ike_sa->get_my_id(ike_sa);
1535 other_id = ike_sa->get_other_eap_id(ike_sa);
1536 other = ike_sa->get_other_host(ike_sa);
1537
1538 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1539 ike_sa->get_unique_id(ike_sa));
1540
1541 /* look for the entry */
1542 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1543 {
1544 /* ike_sa_id must be updated */
1545 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1546 /* signal waiting threads */
1547 entry->checked_out = FALSE;
1548 entry->processing = -1;
1549 /* check if this SA is half-open */
1550 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1551 {
1552 /* not half open anymore */
1553 entry->half_open = FALSE;
1554 remove_half_open(this, entry);
1555 }
1556 else if (entry->half_open && !other->ip_equals(other, entry->other))
1557 {
1558 /* the other host's IP has changed, we must update the hash table */
1559 remove_half_open(this, entry);
1560 DESTROY_IF(entry->other);
1561 entry->other = other->clone(other);
1562 put_half_open(this, entry);
1563 }
1564 else if (!entry->half_open &&
1565 !entry->ike_sa_id->is_initiator(entry->ike_sa_id) &&
1566 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1567 {
1568 /* this is a new half-open SA */
1569 entry->half_open = TRUE;
1570 entry->other = other->clone(other);
1571 put_half_open(this, entry);
1572 }
1573 DBG2(DBG_MGR, "check-in of IKE_SA successful.");
1574 entry->condvar->signal(entry->condvar);
1575 }
1576 else
1577 {
1578 entry = entry_create();
1579 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1580 entry->ike_sa = ike_sa;
1581 segment = put_entry(this, entry);
1582 }
1583
1584 /* apply identities for duplicate test */
1585 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1586 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1587 entry->my_id == NULL && entry->other_id == NULL)
1588 {
1589 if (ike_sa->get_version(ike_sa) == IKEV1)
1590 {
1591 /* If authenticated and received INITIAL_CONTACT,
1592 * delete any existing IKE_SAs with that peer. */
1593 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1594 {
1595 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1596 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1597 }
1598 }
1599
1600 entry->my_id = my_id->clone(my_id);
1601 entry->other_id = other_id->clone(other_id);
1602 if (!entry->other)
1603 {
1604 entry->other = other->clone(other);
1605 }
1606 put_connected_peers(this, entry);
1607 }
1608
1609 unlock_single_segment(this, segment);
1610
1611 charon->bus->set_sa(charon->bus, NULL);
1612 }
1613
1614 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1615 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1616 {
1617 /* deletion is a bit complex, we must ensure that no thread is waiting for
1618 * this SA.
1619 * We take this SA from the table, and start signaling while threads
1620 * are in the condvar.
1621 */
1622 entry_t *entry;
1623 ike_sa_id_t *ike_sa_id;
1624 u_int segment;
1625
1626 ike_sa_id = ike_sa->get_id(ike_sa);
1627
1628 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1629 ike_sa->get_unique_id(ike_sa));
1630
1631 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1632 {
1633 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1634 { /* it looks like flush() has been called and the SA is being deleted
1635 * anyway, just check it in */
1636 DBG2(DBG_MGR, "ignored check-in and destroy of IKE_SA during shutdown");
1637 entry->checked_out = FALSE;
1638 entry->condvar->broadcast(entry->condvar);
1639 unlock_single_segment(this, segment);
1640 return;
1641 }
1642
1643 /* drive out waiting threads, as we are in hurry */
1644 entry->driveout_waiting_threads = TRUE;
1645 /* mark it, so no new threads can get this entry */
1646 entry->driveout_new_threads = TRUE;
1647 /* wait until all workers have done their work */
1648 while (entry->waiting_threads)
1649 {
1650 /* wake up all */
1651 entry->condvar->broadcast(entry->condvar);
1652 /* they will wake us again when their work is done */
1653 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1654 }
1655 remove_entry(this, entry);
1656 unlock_single_segment(this, segment);
1657
1658 if (entry->half_open)
1659 {
1660 remove_half_open(this, entry);
1661 }
1662 if (entry->my_id && entry->other_id)
1663 {
1664 remove_connected_peers(this, entry);
1665 }
1666 if (entry->init_hash.ptr)
1667 {
1668 remove_init_hash(this, entry->init_hash);
1669 }
1670
1671 entry_destroy(entry);
1672
1673 DBG2(DBG_MGR, "check-in and destroy of IKE_SA successful");
1674 }
1675 else
1676 {
1677 DBG1(DBG_MGR, "tried to check-in and delete nonexisting IKE_SA");
1678 ike_sa->destroy(ike_sa);
1679 }
1680 charon->bus->set_sa(charon->bus, NULL);
1681 }
1682
1683 /**
1684 * Cleanup function for create_id_enumerator
1685 */
1686 static void id_enumerator_cleanup(linked_list_t *ids)
1687 {
1688 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1689 }
1690
1691 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1692 private_ike_sa_manager_t *this, identification_t *me,
1693 identification_t *other, int family)
1694 {
1695 table_item_t *item;
1696 u_int row, segment;
1697 rwlock_t *lock;
1698 linked_list_t *ids = NULL;
1699
1700 row = chunk_hash_inc(other->get_encoding(other),
1701 chunk_hash(me->get_encoding(me))) & this->table_mask;
1702 segment = row & this->segment_mask;
1703
1704 lock = this->connected_peers_segments[segment].lock;
1705 lock->read_lock(lock);
1706 item = this->connected_peers_table[row];
1707 while (item)
1708 {
1709 connected_peers_t *current = item->value;
1710
1711 if (connected_peers_match(current, me, other, family))
1712 {
1713 ids = current->sas->clone_offset(current->sas,
1714 offsetof(ike_sa_id_t, clone));
1715 break;
1716 }
1717 item = item->next;
1718 }
1719 lock->unlock(lock);
1720
1721 if (!ids)
1722 {
1723 return enumerator_create_empty();
1724 }
1725 return enumerator_create_cleaner(ids->create_enumerator(ids),
1726 (void*)id_enumerator_cleanup, ids);
1727 }
1728
1729 /**
1730 * Move all CHILD_SAs from old to new
1731 */
1732 static void adopt_children(ike_sa_t *old, ike_sa_t *new)
1733 {
1734 enumerator_t *enumerator;
1735 child_sa_t *child_sa;
1736
1737 enumerator = old->create_child_sa_enumerator(old);
1738 while (enumerator->enumerate(enumerator, &child_sa))
1739 {
1740 old->remove_child_sa(old, enumerator);
1741 new->add_child_sa(new, child_sa);
1742 }
1743 enumerator->destroy(enumerator);
1744 }
1745
1746 /**
1747 * Check if the replaced IKE_SA might get reauthenticated from host
1748 */
1749 static bool is_ikev1_reauth(ike_sa_t *duplicate, host_t *host)
1750 {
1751 return duplicate->get_version(duplicate) == IKEV1 &&
1752 host->equals(host, duplicate->get_other_host(duplicate));
1753 }
1754
1755 /**
1756 * Delete an existing IKE_SA due to a unique replace policy
1757 */
1758 static status_t enforce_replace(private_ike_sa_manager_t *this,
1759 ike_sa_t *duplicate, ike_sa_t *new,
1760 identification_t *other, host_t *host)
1761 {
1762 charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
1763
1764 if (is_ikev1_reauth(duplicate, host))
1765 {
1766 /* looks like a reauthentication attempt */
1767 if (!new->has_condition(new, COND_INIT_CONTACT_SEEN))
1768 {
1769 adopt_children(duplicate, new);
1770 }
1771 /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
1772 * peers need to complete the new SA first, otherwise the quick modes
1773 * might get lost. */
1774 lib->scheduler->schedule_job(lib->scheduler, (job_t*)
1775 delete_ike_sa_job_create(duplicate->get_id(duplicate), TRUE), 10);
1776 return SUCCESS;
1777 }
1778 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer '%Y' due to "
1779 "uniqueness policy", other);
1780 return duplicate->delete(duplicate);
1781 }
1782
1783 METHOD(ike_sa_manager_t, check_uniqueness, bool,
1784 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
1785 {
1786 bool cancel = FALSE;
1787 peer_cfg_t *peer_cfg;
1788 unique_policy_t policy;
1789 enumerator_t *enumerator;
1790 ike_sa_id_t *id = NULL;
1791 identification_t *me, *other;
1792 host_t *other_host;
1793
1794 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
1795 policy = peer_cfg->get_unique_policy(peer_cfg);
1796 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
1797 {
1798 return FALSE;
1799 }
1800 me = ike_sa->get_my_id(ike_sa);
1801 other = ike_sa->get_other_eap_id(ike_sa);
1802 other_host = ike_sa->get_other_host(ike_sa);
1803
1804 enumerator = create_id_enumerator(this, me, other,
1805 other_host->get_family(other_host));
1806 while (enumerator->enumerate(enumerator, &id))
1807 {
1808 status_t status = SUCCESS;
1809 ike_sa_t *duplicate;
1810
1811 duplicate = checkout(this, id);
1812 if (!duplicate)
1813 {
1814 continue;
1815 }
1816 if (force_replace)
1817 {
1818 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
1819 "received INITIAL_CONTACT", other);
1820 charon->bus->ike_updown(charon->bus, duplicate, FALSE);
1821 checkin_and_destroy(this, duplicate);
1822 continue;
1823 }
1824 peer_cfg = duplicate->get_peer_cfg(duplicate);
1825 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
1826 {
1827 switch (duplicate->get_state(duplicate))
1828 {
1829 case IKE_ESTABLISHED:
1830 case IKE_REKEYING:
1831 switch (policy)
1832 {
1833 case UNIQUE_REPLACE:
1834 status = enforce_replace(this, duplicate, ike_sa,
1835 other, other_host);
1836 break;
1837 case UNIQUE_KEEP:
1838 if (!is_ikev1_reauth(duplicate, other_host))
1839 {
1840 cancel = TRUE;
1841 /* we keep the first IKE_SA and delete all
1842 * other duplicates that might exist */
1843 policy = UNIQUE_REPLACE;
1844 }
1845 break;
1846 default:
1847 break;
1848 }
1849 break;
1850 default:
1851 break;
1852 }
1853 }
1854 if (status == DESTROY_ME)
1855 {
1856 checkin_and_destroy(this, duplicate);
1857 }
1858 else
1859 {
1860 checkin(this, duplicate);
1861 }
1862 }
1863 enumerator->destroy(enumerator);
1864 /* reset thread's current IKE_SA after checkin */
1865 charon->bus->set_sa(charon->bus, ike_sa);
1866 return cancel;
1867 }
1868
1869 METHOD(ike_sa_manager_t, has_contact, bool,
1870 private_ike_sa_manager_t *this, identification_t *me,
1871 identification_t *other, int family)
1872 {
1873 table_item_t *item;
1874 u_int row, segment;
1875 rwlock_t *lock;
1876 bool found = FALSE;
1877
1878 row = chunk_hash_inc(other->get_encoding(other),
1879 chunk_hash(me->get_encoding(me))) & this->table_mask;
1880 segment = row & this->segment_mask;
1881 lock = this->connected_peers_segments[segment].lock;
1882 lock->read_lock(lock);
1883 item = this->connected_peers_table[row];
1884 while (item)
1885 {
1886 if (connected_peers_match(item->value, me, other, family))
1887 {
1888 found = TRUE;
1889 break;
1890 }
1891 item = item->next;
1892 }
1893 lock->unlock(lock);
1894
1895 return found;
1896 }
1897
1898 METHOD(ike_sa_manager_t, get_count, u_int,
1899 private_ike_sa_manager_t *this)
1900 {
1901 u_int segment, count = 0;
1902 mutex_t *mutex;
1903
1904 for (segment = 0; segment < this->segment_count; segment++)
1905 {
1906 mutex = this->segments[segment & this->segment_mask].mutex;
1907 mutex->lock(mutex);
1908 count += this->segments[segment].count;
1909 mutex->unlock(mutex);
1910 }
1911 return count;
1912 }
1913
1914 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
1915 private_ike_sa_manager_t *this, host_t *ip)
1916 {
1917 table_item_t *item;
1918 u_int row, segment;
1919 rwlock_t *lock;
1920 chunk_t addr;
1921 u_int count = 0;
1922
1923 if (ip)
1924 {
1925 addr = ip->get_address(ip);
1926 row = chunk_hash(addr) & this->table_mask;
1927 segment = row & this->segment_mask;
1928 lock = this->half_open_segments[segment].lock;
1929 lock->read_lock(lock);
1930 item = this->half_open_table[row];
1931 while (item)
1932 {
1933 half_open_t *half_open = item->value;
1934
1935 if (chunk_equals(addr, half_open->other))
1936 {
1937 count = half_open->count;
1938 break;
1939 }
1940 item = item->next;
1941 }
1942 lock->unlock(lock);
1943 }
1944 else
1945 {
1946 count = (u_int)ref_cur(&this->half_open_count);
1947 }
1948 return count;
1949 }
1950
1951 METHOD(ike_sa_manager_t, flush, void,
1952 private_ike_sa_manager_t *this)
1953 {
1954 /* destroy all list entries */
1955 enumerator_t *enumerator;
1956 entry_t *entry;
1957 u_int segment;
1958
1959 lock_all_segments(this);
1960 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
1961 /* Step 1: drive out all waiting threads */
1962 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
1963 enumerator = create_table_enumerator(this);
1964 while (enumerator->enumerate(enumerator, &entry, &segment))
1965 {
1966 /* do not accept new threads, drive out waiting threads */
1967 entry->driveout_new_threads = TRUE;
1968 entry->driveout_waiting_threads = TRUE;
1969 }
1970 enumerator->destroy(enumerator);
1971 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
1972 /* Step 2: wait until all are gone */
1973 enumerator = create_table_enumerator(this);
1974 while (enumerator->enumerate(enumerator, &entry, &segment))
1975 {
1976 while (entry->waiting_threads || entry->checked_out)
1977 {
1978 /* wake up all */
1979 entry->condvar->broadcast(entry->condvar);
1980 /* go sleeping until they are gone */
1981 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1982 }
1983 }
1984 enumerator->destroy(enumerator);
1985 DBG2(DBG_MGR, "delete all IKE_SA's");
1986 /* Step 3: initiate deletion of all IKE_SAs */
1987 enumerator = create_table_enumerator(this);
1988 while (enumerator->enumerate(enumerator, &entry, &segment))
1989 {
1990 charon->bus->set_sa(charon->bus, entry->ike_sa);
1991 if (entry->ike_sa->get_version(entry->ike_sa) == IKEV2)
1992 { /* as the delete never gets processed, fire down events */
1993 switch (entry->ike_sa->get_state(entry->ike_sa))
1994 {
1995 case IKE_ESTABLISHED:
1996 case IKE_REKEYING:
1997 case IKE_DELETING:
1998 charon->bus->ike_updown(charon->bus, entry->ike_sa, FALSE);
1999 break;
2000 default:
2001 break;
2002 }
2003 }
2004 entry->ike_sa->delete(entry->ike_sa);
2005 }
2006 enumerator->destroy(enumerator);
2007
2008 DBG2(DBG_MGR, "destroy all entries");
2009 /* Step 4: destroy all entries */
2010 enumerator = create_table_enumerator(this);
2011 while (enumerator->enumerate(enumerator, &entry, &segment))
2012 {
2013 charon->bus->set_sa(charon->bus, entry->ike_sa);
2014 if (entry->half_open)
2015 {
2016 remove_half_open(this, entry);
2017 }
2018 if (entry->my_id && entry->other_id)
2019 {
2020 remove_connected_peers(this, entry);
2021 }
2022 if (entry->init_hash.ptr)
2023 {
2024 remove_init_hash(this, entry->init_hash);
2025 }
2026 remove_entry_at((private_enumerator_t*)enumerator);
2027 entry_destroy(entry);
2028 }
2029 enumerator->destroy(enumerator);
2030 charon->bus->set_sa(charon->bus, NULL);
2031 unlock_all_segments(this);
2032
2033 this->rng->destroy(this->rng);
2034 this->rng = NULL;
2035 }
2036
2037 METHOD(ike_sa_manager_t, destroy, void,
2038 private_ike_sa_manager_t *this)
2039 {
2040 u_int i;
2041
2042 /* these are already cleared in flush() above */
2043 free(this->ike_sa_table);
2044 free(this->half_open_table);
2045 free(this->connected_peers_table);
2046 free(this->init_hashes_table);
2047 for (i = 0; i < this->segment_count; i++)
2048 {
2049 this->segments[i].mutex->destroy(this->segments[i].mutex);
2050 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2051 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2052 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2053 }
2054 free(this->segments);
2055 free(this->half_open_segments);
2056 free(this->connected_peers_segments);
2057 free(this->init_hashes_segments);
2058
2059 free(this);
2060 }
2061
2062 /**
2063 * This function returns the next-highest power of two for the given number.
2064 * The algorithm works by setting all bits on the right-hand side of the most
2065 * significant 1 to 1 and then increments the whole number so it rolls over
2066 * to the nearest power of two. Note: returns 0 for n == 0
2067 */
2068 static u_int get_nearest_powerof2(u_int n)
2069 {
2070 u_int i;
2071
2072 --n;
2073 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2074 {
2075 n |= n >> i;
2076 }
2077 return ++n;
2078 }
2079
2080 /*
2081 * Described in header.
2082 */
2083 ike_sa_manager_t *ike_sa_manager_create()
2084 {
2085 private_ike_sa_manager_t *this;
2086 u_int i;
2087
2088 INIT(this,
2089 .public = {
2090 .checkout = _checkout,
2091 .checkout_new = _checkout_new,
2092 .checkout_by_message = _checkout_by_message,
2093 .checkout_by_config = _checkout_by_config,
2094 .checkout_by_id = _checkout_by_id,
2095 .checkout_by_name = _checkout_by_name,
2096 .check_uniqueness = _check_uniqueness,
2097 .has_contact = _has_contact,
2098 .create_enumerator = _create_enumerator,
2099 .create_id_enumerator = _create_id_enumerator,
2100 .checkin = _checkin,
2101 .checkin_and_destroy = _checkin_and_destroy,
2102 .get_count = _get_count,
2103 .get_half_open_count = _get_half_open_count,
2104 .flush = _flush,
2105 .destroy = _destroy,
2106 },
2107 );
2108
2109 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2110 if (this->rng == NULL)
2111 {
2112 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2113 free(this);
2114 return NULL;
2115 }
2116
2117 this->ikesa_limit = lib->settings->get_int(lib->settings,
2118 "%s.ikesa_limit", 0, lib->ns);
2119
2120 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2121 lib->settings, "%s.ikesa_table_size",
2122 DEFAULT_HASHTABLE_SIZE, lib->ns));
2123 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2124 this->table_mask = this->table_size - 1;
2125
2126 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2127 lib->settings, "%s.ikesa_table_segments",
2128 DEFAULT_SEGMENT_COUNT, lib->ns));
2129 this->segment_count = max(1, min(this->segment_count, this->table_size));
2130 this->segment_mask = this->segment_count - 1;
2131
2132 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2133 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2134 for (i = 0; i < this->segment_count; i++)
2135 {
2136 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2137 this->segments[i].count = 0;
2138 }
2139
2140 /* we use the same table parameters for the table to track half-open SAs */
2141 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2142 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2143 for (i = 0; i < this->segment_count; i++)
2144 {
2145 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2146 this->half_open_segments[i].count = 0;
2147 }
2148
2149 /* also for the hash table used for duplicate tests */
2150 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2151 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2152 for (i = 0; i < this->segment_count; i++)
2153 {
2154 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2155 this->connected_peers_segments[i].count = 0;
2156 }
2157
2158 /* and again for the table of hashes of seen initial IKE messages */
2159 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2160 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2161 for (i = 0; i < this->segment_count; i++)
2162 {
2163 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2164 this->init_hashes_segments[i].count = 0;
2165 }
2166
2167 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2168 "%s.reuse_ikesa", TRUE, lib->ns);
2169 return &this->public;
2170 }