]> git.ipfire.org Git - thirdparty/strongswan.git/blob - src/libcharon/sa/ike_sa_manager.c
ike-sa-manager: Migrate child creating tasks during IKEv1 reauth
[thirdparty/strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 *
5 * Copyright (C) 2008-2018 Tobias Brunner
6 * Copyright (C) 2005 Jan Hutter
7 * HSR Hochschule fuer Technik Rapperswil
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 */
19
20 #include <string.h>
21 #include <inttypes.h>
22
23 #include "ike_sa_manager.h"
24
25 #include <daemon.h>
26 #include <sa/ike_sa_id.h>
27 #include <bus/bus.h>
28 #include <threading/thread.h>
29 #include <threading/condvar.h>
30 #include <threading/mutex.h>
31 #include <threading/rwlock.h>
32 #include <collections/linked_list.h>
33 #include <crypto/hashers/hasher.h>
34 #include <processing/jobs/delete_ike_sa_job.h>
35
36 /* the default size of the hash table (MUST be a power of 2) */
37 #define DEFAULT_HASHTABLE_SIZE 1
38
39 /* the maximum size of the hash table (MUST be a power of 2) */
40 #define MAX_HASHTABLE_SIZE (1 << 30)
41
42 /* the default number of segments (MUST be a power of 2) */
43 #define DEFAULT_SEGMENT_COUNT 1
44
45 typedef struct entry_t entry_t;
46
47 /**
48 * An entry in the linked list, contains IKE_SA, locking and lookup data.
49 */
50 struct entry_t {
51
52 /**
53 * Number of threads waiting for this ike_sa_t object.
54 */
55 int waiting_threads;
56
57 /**
58 * Condvar where threads can wait until ike_sa_t object is free for use again.
59 */
60 condvar_t *condvar;
61
62 /**
63 * Thread by which this IKE_SA is currently checked out, if any
64 */
65 thread_t *checked_out;
66
67 /**
68 * Does this SA drives out new threads?
69 */
70 bool driveout_new_threads;
71
72 /**
73 * Does this SA drives out waiting threads?
74 */
75 bool driveout_waiting_threads;
76
77 /**
78 * Identification of an IKE_SA (SPIs).
79 */
80 ike_sa_id_t *ike_sa_id;
81
82 /**
83 * The contained ike_sa_t object.
84 */
85 ike_sa_t *ike_sa;
86
87 /**
88 * hash of the IKE_SA_INIT message, used to detect retransmissions
89 */
90 chunk_t init_hash;
91
92 /**
93 * remote host address, required for DoS detection and duplicate
94 * checking (host with same my_id and other_id is *not* considered
95 * a duplicate if the address family differs)
96 */
97 host_t *other;
98
99 /**
100 * As responder: Is this SA half-open?
101 */
102 bool half_open;
103
104 /**
105 * own identity, required for duplicate checking
106 */
107 identification_t *my_id;
108
109 /**
110 * remote identity, required for duplicate checking
111 */
112 identification_t *other_id;
113
114 /**
115 * message ID or hash of currently processing message, -1 if none
116 */
117 uint32_t processing;
118 };
119
120 /**
121 * Implementation of entry_t.destroy.
122 */
123 static status_t entry_destroy(entry_t *this)
124 {
125 /* also destroy IKE SA */
126 this->ike_sa->destroy(this->ike_sa);
127 this->ike_sa_id->destroy(this->ike_sa_id);
128 chunk_free(&this->init_hash);
129 DESTROY_IF(this->other);
130 DESTROY_IF(this->my_id);
131 DESTROY_IF(this->other_id);
132 this->condvar->destroy(this->condvar);
133 free(this);
134 return SUCCESS;
135 }
136
137 /**
138 * Creates a new entry for the ike_sa_t list.
139 */
140 static entry_t *entry_create()
141 {
142 entry_t *this;
143
144 INIT(this,
145 .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
146 .processing = -1,
147 );
148
149 return this;
150 }
151
152 /**
153 * Function that matches entry_t objects by ike_sa_id_t.
154 */
155 static bool entry_match_by_id(entry_t *entry, void *arg)
156 {
157 ike_sa_id_t *id = arg;
158
159 if (id->equals(id, entry->ike_sa_id))
160 {
161 return TRUE;
162 }
163 if ((id->get_responder_spi(id) == 0 ||
164 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
165 (id->get_ike_version(id) == IKEV1_MAJOR_VERSION ||
166 id->is_initiator(id) == entry->ike_sa_id->is_initiator(entry->ike_sa_id)) &&
167 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
168 {
169 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
170 return TRUE;
171 }
172 return FALSE;
173 }
174
175 /**
176 * Function that matches entry_t objects by ike_sa_t pointers.
177 */
178 static bool entry_match_by_sa(entry_t *entry, void *ike_sa)
179 {
180 return entry->ike_sa == ike_sa;
181 }
182
183 /**
184 * Hash function for ike_sa_id_t objects.
185 */
186 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
187 {
188 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
189 * locally unique, so we use our randomly allocated SPI whether we are
190 * initiator or responder to ensure a good distribution. The latter is not
191 * possible for IKEv1 as we don't know whether we are original initiator or
192 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
193 * SPIs (Cookies) to be allocated near random (we allocate them randomly
194 * anyway) it seems safe to always use the initiator SPI. */
195 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
196 ike_sa_id->is_initiator(ike_sa_id))
197 {
198 return ike_sa_id->get_initiator_spi(ike_sa_id);
199 }
200 return ike_sa_id->get_responder_spi(ike_sa_id);
201 }
202
203 typedef struct half_open_t half_open_t;
204
205 /**
206 * Struct to manage half-open IKE_SAs per peer.
207 */
208 struct half_open_t {
209 /** chunk of remote host address */
210 chunk_t other;
211
212 /** the number of half-open IKE_SAs with that host */
213 u_int count;
214
215 /** the number of half-open IKE_SAs we responded to with that host */
216 u_int count_responder;
217 };
218
219 /**
220 * Destroys a half_open_t object.
221 */
222 static void half_open_destroy(half_open_t *this)
223 {
224 chunk_free(&this->other);
225 free(this);
226 }
227
228 typedef struct connected_peers_t connected_peers_t;
229
230 struct connected_peers_t {
231 /** own identity */
232 identification_t *my_id;
233
234 /** remote identity */
235 identification_t *other_id;
236
237 /** ip address family of peer */
238 int family;
239
240 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
241 linked_list_t *sas;
242 };
243
244 static void connected_peers_destroy(connected_peers_t *this)
245 {
246 this->my_id->destroy(this->my_id);
247 this->other_id->destroy(this->other_id);
248 this->sas->destroy(this->sas);
249 free(this);
250 }
251
252 /**
253 * Function that matches connected_peers_t objects by the given ids.
254 */
255 static inline bool connected_peers_match(connected_peers_t *connected_peers,
256 identification_t *my_id, identification_t *other_id,
257 int family)
258 {
259 return my_id->equals(my_id, connected_peers->my_id) &&
260 other_id->equals(other_id, connected_peers->other_id) &&
261 (!family || family == connected_peers->family);
262 }
263
264 typedef struct init_hash_t init_hash_t;
265
266 struct init_hash_t {
267 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
268 chunk_t hash;
269
270 /** our SPI allocated for the IKE_SA based on this message */
271 uint64_t our_spi;
272 };
273
274 typedef struct segment_t segment_t;
275
276 /**
277 * Struct to manage segments of the hash table.
278 */
279 struct segment_t {
280 /** mutex to access a segment exclusively */
281 mutex_t *mutex;
282 };
283
284 typedef struct shareable_segment_t shareable_segment_t;
285
286 /**
287 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
288 */
289 struct shareable_segment_t {
290 /** rwlock to access a segment non-/exclusively */
291 rwlock_t *lock;
292
293 /** the number of entries in this segment - in case of the "half-open table"
294 * it's the sum of all half_open_t.count in a segment. */
295 u_int count;
296 };
297
298 typedef struct table_item_t table_item_t;
299
300 /**
301 * Instead of using linked_list_t for each bucket we store the data in our own
302 * list to save memory.
303 */
304 struct table_item_t {
305 /** data of this item */
306 void *value;
307
308 /** next item in the overflow list */
309 table_item_t *next;
310 };
311
312 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
313
314 /**
315 * Additional private members of ike_sa_manager_t.
316 */
317 struct private_ike_sa_manager_t {
318 /**
319 * Public interface of ike_sa_manager_t.
320 */
321 ike_sa_manager_t public;
322
323 /**
324 * Hash table with entries for the ike_sa_t objects.
325 */
326 table_item_t **ike_sa_table;
327
328 /**
329 * The size of the hash table.
330 */
331 u_int table_size;
332
333 /**
334 * Mask to map the hashes to table rows.
335 */
336 u_int table_mask;
337
338 /**
339 * Segments of the hash table.
340 */
341 segment_t *segments;
342
343 /**
344 * The number of segments.
345 */
346 u_int segment_count;
347
348 /**
349 * Mask to map a table row to a segment.
350 */
351 u_int segment_mask;
352
353 /**
354 * Hash table with half_open_t objects.
355 */
356 table_item_t **half_open_table;
357
358 /**
359 * Segments of the "half-open" hash table.
360 */
361 shareable_segment_t *half_open_segments;
362
363 /**
364 * Total number of half-open IKE_SAs.
365 */
366 refcount_t half_open_count;
367
368 /**
369 * Total number of half-open IKE_SAs as responder.
370 */
371 refcount_t half_open_count_responder;
372
373 /**
374 * Total number of IKE_SAs registered with IKE_SA manager.
375 */
376 refcount_t total_sa_count;
377
378 /**
379 * Hash table with connected_peers_t objects.
380 */
381 table_item_t **connected_peers_table;
382
383 /**
384 * Segments of the "connected peers" hash table.
385 */
386 shareable_segment_t *connected_peers_segments;
387
388 /**
389 * Hash table with init_hash_t objects.
390 */
391 table_item_t **init_hashes_table;
392
393 /**
394 * Segments of the "hashes" hash table.
395 */
396 segment_t *init_hashes_segments;
397
398 /**
399 * RNG to get random SPIs for our side
400 */
401 rng_t *rng;
402
403 /**
404 * Registered callback for IKE SPIs
405 */
406 struct {
407 spi_cb_t cb;
408 void *data;
409 } spi_cb;
410
411 /**
412 * Lock to access the RNG instance and the callback
413 */
414 rwlock_t *spi_lock;
415
416 /**
417 * reuse existing IKE_SAs in checkout_by_config
418 */
419 bool reuse_ikesa;
420
421 /**
422 * Configured IKE_SA limit, if any
423 */
424 u_int ikesa_limit;
425 };
426
427 /**
428 * Acquire a lock to access the segment of the table row with the given index.
429 * It also works with the segment index directly.
430 */
431 static inline void lock_single_segment(private_ike_sa_manager_t *this,
432 u_int index)
433 {
434 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
435 lock->lock(lock);
436 }
437
438 /**
439 * Release the lock required to access the segment of the table row with the given index.
440 * It also works with the segment index directly.
441 */
442 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
443 u_int index)
444 {
445 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
446 lock->unlock(lock);
447 }
448
449 /**
450 * Lock all segments
451 */
452 static void lock_all_segments(private_ike_sa_manager_t *this)
453 {
454 u_int i;
455
456 for (i = 0; i < this->segment_count; i++)
457 {
458 this->segments[i].mutex->lock(this->segments[i].mutex);
459 }
460 }
461
462 /**
463 * Unlock all segments
464 */
465 static void unlock_all_segments(private_ike_sa_manager_t *this)
466 {
467 u_int i;
468
469 for (i = 0; i < this->segment_count; i++)
470 {
471 this->segments[i].mutex->unlock(this->segments[i].mutex);
472 }
473 }
474
475 typedef struct private_enumerator_t private_enumerator_t;
476
477 /**
478 * hash table enumerator implementation
479 */
480 struct private_enumerator_t {
481
482 /**
483 * implements enumerator interface
484 */
485 enumerator_t enumerator;
486
487 /**
488 * associated ike_sa_manager_t
489 */
490 private_ike_sa_manager_t *manager;
491
492 /**
493 * current segment index
494 */
495 u_int segment;
496
497 /**
498 * currently enumerating entry
499 */
500 entry_t *entry;
501
502 /**
503 * current table row index
504 */
505 u_int row;
506
507 /**
508 * current table item
509 */
510 table_item_t *current;
511
512 /**
513 * previous table item
514 */
515 table_item_t *prev;
516 };
517
518 METHOD(enumerator_t, enumerate, bool,
519 private_enumerator_t *this, va_list args)
520 {
521 entry_t **entry;
522 u_int *segment;
523
524 VA_ARGS_VGET(args, entry, segment);
525
526 if (this->entry)
527 {
528 this->entry->condvar->signal(this->entry->condvar);
529 this->entry = NULL;
530 }
531 while (this->segment < this->manager->segment_count)
532 {
533 while (this->row < this->manager->table_size)
534 {
535 this->prev = this->current;
536 if (this->current)
537 {
538 this->current = this->current->next;
539 }
540 else
541 {
542 lock_single_segment(this->manager, this->segment);
543 this->current = this->manager->ike_sa_table[this->row];
544 }
545 if (this->current)
546 {
547 *entry = this->entry = this->current->value;
548 *segment = this->segment;
549 return TRUE;
550 }
551 unlock_single_segment(this->manager, this->segment);
552 this->row += this->manager->segment_count;
553 }
554 this->segment++;
555 this->row = this->segment;
556 }
557 return FALSE;
558 }
559
560 METHOD(enumerator_t, enumerator_destroy, void,
561 private_enumerator_t *this)
562 {
563 if (this->entry)
564 {
565 this->entry->condvar->signal(this->entry->condvar);
566 }
567 if (this->current)
568 {
569 unlock_single_segment(this->manager, this->segment);
570 }
571 free(this);
572 }
573
574 /**
575 * Creates an enumerator to enumerate the entries in the hash table.
576 */
577 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
578 {
579 private_enumerator_t *enumerator;
580
581 INIT(enumerator,
582 .enumerator = {
583 .enumerate = enumerator_enumerate_default,
584 .venumerate = _enumerate,
585 .destroy = _enumerator_destroy,
586 },
587 .manager = this,
588 );
589 return &enumerator->enumerator;
590 }
591
592 /**
593 * Put an entry into the hash table.
594 * Note: The caller has to unlock the returned segment.
595 */
596 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
597 {
598 table_item_t *current, *item;
599 u_int row, segment;
600
601 INIT(item,
602 .value = entry,
603 );
604
605 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
606 segment = row & this->segment_mask;
607
608 lock_single_segment(this, segment);
609 current = this->ike_sa_table[row];
610 if (current)
611 { /* insert at the front of current bucket */
612 item->next = current;
613 }
614 this->ike_sa_table[row] = item;
615 ref_get(&this->total_sa_count);
616 return segment;
617 }
618
619 /**
620 * Remove an entry from the hash table.
621 * Note: The caller MUST have a lock on the segment of this entry.
622 */
623 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
624 {
625 table_item_t *item, *prev = NULL;
626 u_int row;
627
628 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
629 item = this->ike_sa_table[row];
630 while (item)
631 {
632 if (item->value == entry)
633 {
634 if (prev)
635 {
636 prev->next = item->next;
637 }
638 else
639 {
640 this->ike_sa_table[row] = item->next;
641 }
642 ignore_result(ref_put(&this->total_sa_count));
643 free(item);
644 break;
645 }
646 prev = item;
647 item = item->next;
648 }
649 }
650
651 /**
652 * Remove the entry at the current enumerator position.
653 */
654 static void remove_entry_at(private_enumerator_t *this)
655 {
656 this->entry = NULL;
657 if (this->current)
658 {
659 table_item_t *current = this->current;
660
661 ignore_result(ref_put(&this->manager->total_sa_count));
662 this->current = this->prev;
663
664 if (this->prev)
665 {
666 this->prev->next = current->next;
667 }
668 else
669 {
670 this->manager->ike_sa_table[this->row] = current->next;
671 unlock_single_segment(this->manager, this->segment);
672 }
673 free(current);
674 }
675 }
676
677 /**
678 * Find an entry using the provided match function to compare the entries for
679 * equality.
680 */
681 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
682 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
683 bool (*match)(entry_t*,void*), void *param)
684 {
685 table_item_t *item;
686 u_int row, seg;
687
688 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
689 seg = row & this->segment_mask;
690
691 lock_single_segment(this, seg);
692 item = this->ike_sa_table[row];
693 while (item)
694 {
695 if (match(item->value, param))
696 {
697 *entry = item->value;
698 *segment = seg;
699 /* the locked segment has to be unlocked by the caller */
700 return SUCCESS;
701 }
702 item = item->next;
703 }
704 unlock_single_segment(this, seg);
705 return NOT_FOUND;
706 }
707
708 /**
709 * Find an entry by ike_sa_id_t.
710 * Note: On SUCCESS, the caller has to unlock the segment.
711 */
712 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
713 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
714 {
715 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
716 entry_match_by_id, ike_sa_id);
717 }
718
719 /**
720 * Find an entry by IKE_SA pointer.
721 * Note: On SUCCESS, the caller has to unlock the segment.
722 */
723 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
724 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
725 {
726 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
727 entry_match_by_sa, ike_sa);
728 }
729
730 /**
731 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
732 * acquirable.
733 */
734 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
735 u_int segment)
736 {
737 if (entry->driveout_new_threads)
738 {
739 /* we are not allowed to get this */
740 return FALSE;
741 }
742 while (entry->checked_out && !entry->driveout_waiting_threads)
743 {
744 /* so wait until we can get it for us.
745 * we register us as waiting. */
746 entry->waiting_threads++;
747 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
748 entry->waiting_threads--;
749 }
750 /* hm, a deletion request forbids us to get this SA, get next one */
751 if (entry->driveout_waiting_threads)
752 {
753 /* we must signal here, others may be waiting on it, too */
754 entry->condvar->signal(entry->condvar);
755 return FALSE;
756 }
757 return TRUE;
758 }
759
760 /**
761 * Put a half-open SA into the hash table.
762 */
763 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
764 {
765 table_item_t *item;
766 u_int row, segment;
767 rwlock_t *lock;
768 ike_sa_id_t *ike_id;
769 half_open_t *half_open;
770 chunk_t addr;
771
772 ike_id = entry->ike_sa_id;
773 addr = entry->other->get_address(entry->other);
774 row = chunk_hash(addr) & this->table_mask;
775 segment = row & this->segment_mask;
776 lock = this->half_open_segments[segment].lock;
777 lock->write_lock(lock);
778 item = this->half_open_table[row];
779 while (item)
780 {
781 half_open = item->value;
782
783 if (chunk_equals(addr, half_open->other))
784 {
785 break;
786 }
787 item = item->next;
788 }
789
790 if (!item)
791 {
792 INIT(half_open,
793 .other = chunk_clone(addr),
794 );
795 INIT(item,
796 .value = half_open,
797 .next = this->half_open_table[row],
798 );
799 this->half_open_table[row] = item;
800 }
801 half_open->count++;
802 ref_get(&this->half_open_count);
803 if (!ike_id->is_initiator(ike_id))
804 {
805 half_open->count_responder++;
806 ref_get(&this->half_open_count_responder);
807 }
808 this->half_open_segments[segment].count++;
809 lock->unlock(lock);
810 }
811
812 /**
813 * Remove a half-open SA from the hash table.
814 */
815 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
816 {
817 table_item_t *item, *prev = NULL;
818 u_int row, segment;
819 rwlock_t *lock;
820 ike_sa_id_t *ike_id;
821 chunk_t addr;
822
823 ike_id = entry->ike_sa_id;
824 addr = entry->other->get_address(entry->other);
825 row = chunk_hash(addr) & this->table_mask;
826 segment = row & this->segment_mask;
827 lock = this->half_open_segments[segment].lock;
828 lock->write_lock(lock);
829 item = this->half_open_table[row];
830 while (item)
831 {
832 half_open_t *half_open = item->value;
833
834 if (chunk_equals(addr, half_open->other))
835 {
836 if (!ike_id->is_initiator(ike_id))
837 {
838 half_open->count_responder--;
839 ignore_result(ref_put(&this->half_open_count_responder));
840 }
841 ignore_result(ref_put(&this->half_open_count));
842 if (--half_open->count == 0)
843 {
844 if (prev)
845 {
846 prev->next = item->next;
847 }
848 else
849 {
850 this->half_open_table[row] = item->next;
851 }
852 half_open_destroy(half_open);
853 free(item);
854 }
855 this->half_open_segments[segment].count--;
856 break;
857 }
858 prev = item;
859 item = item->next;
860 }
861 lock->unlock(lock);
862 }
863
864 CALLBACK(id_matches, bool,
865 ike_sa_id_t *a, va_list args)
866 {
867 ike_sa_id_t *b;
868
869 VA_ARGS_VGET(args, b);
870 return a->equals(a, b);
871 }
872
873 /**
874 * Put an SA between two peers into the hash table.
875 */
876 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
877 {
878 table_item_t *item;
879 u_int row, segment;
880 rwlock_t *lock;
881 connected_peers_t *connected_peers;
882 chunk_t my_id, other_id;
883 int family;
884
885 my_id = entry->my_id->get_encoding(entry->my_id);
886 other_id = entry->other_id->get_encoding(entry->other_id);
887 family = entry->other->get_family(entry->other);
888 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
889 segment = row & this->segment_mask;
890 lock = this->connected_peers_segments[segment].lock;
891 lock->write_lock(lock);
892 item = this->connected_peers_table[row];
893 while (item)
894 {
895 connected_peers = item->value;
896
897 if (connected_peers_match(connected_peers, entry->my_id,
898 entry->other_id, family))
899 {
900 if (connected_peers->sas->find_first(connected_peers->sas,
901 id_matches, NULL, entry->ike_sa_id))
902 {
903 lock->unlock(lock);
904 return;
905 }
906 break;
907 }
908 item = item->next;
909 }
910
911 if (!item)
912 {
913 INIT(connected_peers,
914 .my_id = entry->my_id->clone(entry->my_id),
915 .other_id = entry->other_id->clone(entry->other_id),
916 .family = family,
917 .sas = linked_list_create(),
918 );
919 INIT(item,
920 .value = connected_peers,
921 .next = this->connected_peers_table[row],
922 );
923 this->connected_peers_table[row] = item;
924 }
925 connected_peers->sas->insert_last(connected_peers->sas,
926 entry->ike_sa_id->clone(entry->ike_sa_id));
927 this->connected_peers_segments[segment].count++;
928 lock->unlock(lock);
929 }
930
931 /**
932 * Remove an SA between two peers from the hash table.
933 */
934 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
935 {
936 table_item_t *item, *prev = NULL;
937 u_int row, segment;
938 rwlock_t *lock;
939 chunk_t my_id, other_id;
940 int family;
941
942 my_id = entry->my_id->get_encoding(entry->my_id);
943 other_id = entry->other_id->get_encoding(entry->other_id);
944 family = entry->other->get_family(entry->other);
945
946 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
947 segment = row & this->segment_mask;
948
949 lock = this->connected_peers_segments[segment].lock;
950 lock->write_lock(lock);
951 item = this->connected_peers_table[row];
952 while (item)
953 {
954 connected_peers_t *current = item->value;
955
956 if (connected_peers_match(current, entry->my_id, entry->other_id,
957 family))
958 {
959 enumerator_t *enumerator;
960 ike_sa_id_t *ike_sa_id;
961
962 enumerator = current->sas->create_enumerator(current->sas);
963 while (enumerator->enumerate(enumerator, &ike_sa_id))
964 {
965 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
966 {
967 current->sas->remove_at(current->sas, enumerator);
968 ike_sa_id->destroy(ike_sa_id);
969 this->connected_peers_segments[segment].count--;
970 break;
971 }
972 }
973 enumerator->destroy(enumerator);
974 if (current->sas->get_count(current->sas) == 0)
975 {
976 if (prev)
977 {
978 prev->next = item->next;
979 }
980 else
981 {
982 this->connected_peers_table[row] = item->next;
983 }
984 connected_peers_destroy(current);
985 free(item);
986 }
987 break;
988 }
989 prev = item;
990 item = item->next;
991 }
992 lock->unlock(lock);
993 }
994
995 /**
996 * Get a random SPI for new IKE_SAs
997 */
998 static uint64_t get_spi(private_ike_sa_manager_t *this)
999 {
1000 uint64_t spi;
1001
1002 this->spi_lock->read_lock(this->spi_lock);
1003 if (this->spi_cb.cb)
1004 {
1005 spi = this->spi_cb.cb(this->spi_cb.data);
1006 }
1007 else if (!this->rng ||
1008 !this->rng->get_bytes(this->rng, sizeof(spi), (uint8_t*)&spi))
1009 {
1010 spi = 0;
1011 }
1012 this->spi_lock->unlock(this->spi_lock);
1013 return spi;
1014 }
1015
1016 /**
1017 * Calculate the hash of the initial IKE message. Memory for the hash is
1018 * allocated on success.
1019 *
1020 * @returns TRUE on success
1021 */
1022 static bool get_init_hash(hasher_t *hasher, message_t *message, chunk_t *hash)
1023 {
1024 host_t *src;
1025
1026 if (message->get_first_payload_type(message) == PLV1_FRAGMENT)
1027 { /* only hash the source IP, port and SPI for fragmented init messages */
1028 uint16_t port;
1029 uint64_t spi;
1030
1031 src = message->get_source(message);
1032 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1033 {
1034 return FALSE;
1035 }
1036 port = src->get_port(src);
1037 if (!hasher->allocate_hash(hasher, chunk_from_thing(port), NULL))
1038 {
1039 return FALSE;
1040 }
1041 spi = message->get_initiator_spi(message);
1042 return hasher->allocate_hash(hasher, chunk_from_thing(spi), hash);
1043 }
1044 if (message->get_exchange_type(message) == ID_PROT)
1045 { /* include the source for Main Mode as the hash will be the same if
1046 * SPIs are reused by two initiators that use the same proposal */
1047 src = message->get_source(message);
1048
1049 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1050 {
1051 return FALSE;
1052 }
1053 }
1054 return hasher->allocate_hash(hasher, message->get_packet_data(message), hash);
1055 }
1056
1057 /**
1058 * Check if we already have created an IKE_SA based on the initial IKE message
1059 * with the given hash.
1060 * If not the hash is stored, the hash data is not(!) cloned.
1061 *
1062 * Also, the local SPI is returned. In case of a retransmit this is already
1063 * stored together with the hash, otherwise it is newly allocated and should
1064 * be used to create the IKE_SA.
1065 *
1066 * @returns ALREADY_DONE if the message with the given hash has been seen before
1067 * NOT_FOUND if the message hash was not found
1068 * FAILED if the SPI allocation failed
1069 */
1070 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1071 chunk_t init_hash, uint64_t *our_spi)
1072 {
1073 table_item_t *item;
1074 u_int row, segment;
1075 mutex_t *mutex;
1076 init_hash_t *init;
1077 uint64_t spi;
1078
1079 row = chunk_hash(init_hash) & this->table_mask;
1080 segment = row & this->segment_mask;
1081 mutex = this->init_hashes_segments[segment].mutex;
1082 mutex->lock(mutex);
1083 item = this->init_hashes_table[row];
1084 while (item)
1085 {
1086 init_hash_t *current = item->value;
1087
1088 if (chunk_equals(init_hash, current->hash))
1089 {
1090 *our_spi = current->our_spi;
1091 mutex->unlock(mutex);
1092 return ALREADY_DONE;
1093 }
1094 item = item->next;
1095 }
1096
1097 spi = get_spi(this);
1098 if (!spi)
1099 {
1100 return FAILED;
1101 }
1102
1103 INIT(init,
1104 .hash = {
1105 .len = init_hash.len,
1106 .ptr = init_hash.ptr,
1107 },
1108 .our_spi = spi,
1109 );
1110 INIT(item,
1111 .value = init,
1112 .next = this->init_hashes_table[row],
1113 );
1114 this->init_hashes_table[row] = item;
1115 *our_spi = init->our_spi;
1116 mutex->unlock(mutex);
1117 return NOT_FOUND;
1118 }
1119
1120 /**
1121 * Remove the hash of an initial IKE message from the cache.
1122 */
1123 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1124 {
1125 table_item_t *item, *prev = NULL;
1126 u_int row, segment;
1127 mutex_t *mutex;
1128
1129 row = chunk_hash(init_hash) & this->table_mask;
1130 segment = row & this->segment_mask;
1131 mutex = this->init_hashes_segments[segment].mutex;
1132 mutex->lock(mutex);
1133 item = this->init_hashes_table[row];
1134 while (item)
1135 {
1136 init_hash_t *current = item->value;
1137
1138 if (chunk_equals(init_hash, current->hash))
1139 {
1140 if (prev)
1141 {
1142 prev->next = item->next;
1143 }
1144 else
1145 {
1146 this->init_hashes_table[row] = item->next;
1147 }
1148 free(current);
1149 free(item);
1150 break;
1151 }
1152 prev = item;
1153 item = item->next;
1154 }
1155 mutex->unlock(mutex);
1156 }
1157
1158 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1159 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1160 {
1161 ike_sa_t *ike_sa = NULL;
1162 entry_t *entry;
1163 u_int segment;
1164
1165 DBG2(DBG_MGR, "checkout %N SA with SPIs %.16"PRIx64"_i %.16"PRIx64"_r",
1166 ike_version_names, ike_sa_id->get_ike_version(ike_sa_id),
1167 be64toh(ike_sa_id->get_initiator_spi(ike_sa_id)),
1168 be64toh(ike_sa_id->get_responder_spi(ike_sa_id)));
1169
1170 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1171 {
1172 if (wait_for_entry(this, entry, segment))
1173 {
1174 entry->checked_out = thread_current();
1175 ike_sa = entry->ike_sa;
1176 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1177 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1178 }
1179 unlock_single_segment(this, segment);
1180 }
1181 charon->bus->set_sa(charon->bus, ike_sa);
1182
1183 if (!ike_sa)
1184 {
1185 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1186 }
1187 return ike_sa;
1188 }
1189
1190 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1191 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1192 {
1193 ike_sa_id_t *ike_sa_id;
1194 ike_sa_t *ike_sa;
1195 uint8_t ike_version;
1196 uint64_t spi;
1197
1198 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1199
1200 spi = get_spi(this);
1201 if (!spi)
1202 {
1203 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1204 return NULL;
1205 }
1206
1207 if (initiator)
1208 {
1209 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1210 }
1211 else
1212 {
1213 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1214 }
1215 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1216 ike_sa_id->destroy(ike_sa_id);
1217
1218 if (ike_sa)
1219 {
1220 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1221 ike_sa->get_unique_id(ike_sa));
1222 }
1223 return ike_sa;
1224 }
1225
1226 /**
1227 * Get the message ID or message hash to detect early retransmissions
1228 */
1229 static uint32_t get_message_id_or_hash(message_t *message)
1230 {
1231 if (message->get_major_version(message) == IKEV1_MAJOR_VERSION)
1232 {
1233 /* Use a hash for IKEv1 Phase 1, where we don't have a MID, and Quick
1234 * Mode, where all three messages use the same message ID */
1235 if (message->get_message_id(message) == 0 ||
1236 message->get_exchange_type(message) == QUICK_MODE)
1237 {
1238 return chunk_hash(message->get_packet_data(message));
1239 }
1240 }
1241 return message->get_message_id(message);
1242 }
1243
1244 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1245 private_ike_sa_manager_t* this, message_t *message)
1246 {
1247 u_int segment;
1248 entry_t *entry;
1249 ike_sa_t *ike_sa = NULL;
1250 ike_sa_id_t *id;
1251 ike_version_t ike_version;
1252 bool is_init = FALSE;
1253
1254 id = message->get_ike_sa_id(message);
1255 /* clone the IKE_SA ID so we can modify the initiator flag */
1256 id = id->clone(id);
1257 id->switch_initiator(id);
1258
1259 DBG2(DBG_MGR, "checkout %N SA by message with SPIs %.16"PRIx64"_i "
1260 "%.16"PRIx64"_r", ike_version_names, id->get_ike_version(id),
1261 be64toh(id->get_initiator_spi(id)),
1262 be64toh(id->get_responder_spi(id)));
1263
1264 if (id->get_responder_spi(id) == 0 &&
1265 message->get_message_id(message) == 0)
1266 {
1267 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1268 {
1269 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1270 message->get_request(message))
1271 {
1272 ike_version = IKEV2;
1273 is_init = TRUE;
1274 }
1275 }
1276 else
1277 {
1278 if (message->get_exchange_type(message) == ID_PROT ||
1279 message->get_exchange_type(message) == AGGRESSIVE)
1280 {
1281 ike_version = IKEV1;
1282 is_init = TRUE;
1283 if (id->is_initiator(id))
1284 { /* not set in IKEv1, switch back before applying to new SA */
1285 id->switch_initiator(id);
1286 }
1287 }
1288 }
1289 }
1290
1291 if (is_init)
1292 {
1293 hasher_t *hasher;
1294 uint64_t our_spi;
1295 chunk_t hash;
1296
1297 hasher = lib->crypto->create_hasher(lib->crypto, HASH_SHA1);
1298 if (!hasher || !get_init_hash(hasher, message, &hash))
1299 {
1300 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1301 DESTROY_IF(hasher);
1302 id->destroy(id);
1303 goto out;
1304 }
1305 hasher->destroy(hasher);
1306
1307 /* ensure this is not a retransmit of an already handled init message */
1308 switch (check_and_put_init_hash(this, hash, &our_spi))
1309 {
1310 case NOT_FOUND:
1311 { /* we've not seen this packet yet, create a new IKE_SA */
1312 if (!this->ikesa_limit ||
1313 this->public.get_count(&this->public) < this->ikesa_limit)
1314 {
1315 id->set_responder_spi(id, our_spi);
1316 ike_sa = ike_sa_create(id, FALSE, ike_version);
1317 if (ike_sa)
1318 {
1319 entry = entry_create();
1320 entry->ike_sa = ike_sa;
1321 entry->ike_sa_id = id;
1322 entry->processing = get_message_id_or_hash(message);
1323 entry->init_hash = hash;
1324
1325 segment = put_entry(this, entry);
1326 entry->checked_out = thread_current();
1327 unlock_single_segment(this, segment);
1328
1329 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1330 ike_sa->get_name(ike_sa),
1331 ike_sa->get_unique_id(ike_sa));
1332 goto out;
1333 }
1334 else
1335 {
1336 DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1337 }
1338 }
1339 else
1340 {
1341 DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1342 exchange_type_names, message->get_exchange_type(message),
1343 this->ikesa_limit);
1344 }
1345 remove_init_hash(this, hash);
1346 chunk_free(&hash);
1347 id->destroy(id);
1348 goto out;
1349 }
1350 case FAILED:
1351 { /* we failed to allocate an SPI */
1352 chunk_free(&hash);
1353 id->destroy(id);
1354 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1355 goto out;
1356 }
1357 case ALREADY_DONE:
1358 default:
1359 break;
1360 }
1361 /* it looks like we already handled this init message to some degree */
1362 id->set_responder_spi(id, our_spi);
1363 chunk_free(&hash);
1364 }
1365
1366 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1367 {
1368 /* only check out if we are not already processing it. */
1369 if (entry->processing == get_message_id_or_hash(message))
1370 {
1371 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1372 entry->processing);
1373 }
1374 else if (wait_for_entry(this, entry, segment))
1375 {
1376 ike_sa_id_t *ike_id;
1377
1378 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1379 entry->checked_out = thread_current();
1380 if (message->get_first_payload_type(message) != PLV1_FRAGMENT &&
1381 message->get_first_payload_type(message) != PLV2_FRAGMENT)
1382 { /* TODO-FRAG: this fails if there are unencrypted payloads */
1383 entry->processing = get_message_id_or_hash(message);
1384 }
1385 if (ike_id->get_responder_spi(ike_id) == 0)
1386 {
1387 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1388 }
1389 ike_sa = entry->ike_sa;
1390 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1391 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1392 }
1393 unlock_single_segment(this, segment);
1394 }
1395 else
1396 {
1397 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1398 }
1399 id->destroy(id);
1400
1401 out:
1402 charon->bus->set_sa(charon->bus, ike_sa);
1403 if (!ike_sa)
1404 {
1405 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1406 }
1407 return ike_sa;
1408 }
1409
1410 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1411 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1412 {
1413 enumerator_t *enumerator;
1414 entry_t *entry;
1415 ike_sa_t *ike_sa = NULL;
1416 peer_cfg_t *current_peer;
1417 ike_cfg_t *current_ike;
1418 u_int segment;
1419
1420 DBG2(DBG_MGR, "checkout IKE_SA by config");
1421
1422 if (!this->reuse_ikesa && peer_cfg->get_ike_version(peer_cfg) != IKEV1)
1423 { /* IKE_SA reuse disabled by config (not possible for IKEv1) */
1424 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1425 charon->bus->set_sa(charon->bus, ike_sa);
1426 goto out;
1427 }
1428
1429 enumerator = create_table_enumerator(this);
1430 while (enumerator->enumerate(enumerator, &entry, &segment))
1431 {
1432 if (!wait_for_entry(this, entry, segment))
1433 {
1434 continue;
1435 }
1436 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING ||
1437 entry->ike_sa->get_state(entry->ike_sa) == IKE_REKEYED)
1438 { /* skip IKE_SAs which are not usable, wake other waiting threads */
1439 entry->condvar->signal(entry->condvar);
1440 continue;
1441 }
1442
1443 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1444 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1445 {
1446 current_ike = current_peer->get_ike_cfg(current_peer);
1447 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1448 {
1449 entry->checked_out = thread_current();
1450 ike_sa = entry->ike_sa;
1451 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1452 ike_sa->get_unique_id(ike_sa),
1453 current_peer->get_name(current_peer));
1454 break;
1455 }
1456 }
1457 /* other threads might be waiting for this entry */
1458 entry->condvar->signal(entry->condvar);
1459 }
1460 enumerator->destroy(enumerator);
1461
1462 if (!ike_sa)
1463 { /* no IKE_SA using such a config, hand out a new */
1464 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1465 }
1466 charon->bus->set_sa(charon->bus, ike_sa);
1467
1468 out:
1469 if (!ike_sa)
1470 {
1471 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1472 }
1473 return ike_sa;
1474 }
1475
1476 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1477 private_ike_sa_manager_t *this, uint32_t id)
1478 {
1479 enumerator_t *enumerator;
1480 entry_t *entry;
1481 ike_sa_t *ike_sa = NULL;
1482 u_int segment;
1483
1484 DBG2(DBG_MGR, "checkout IKE_SA by unique ID %u", id);
1485
1486 enumerator = create_table_enumerator(this);
1487 while (enumerator->enumerate(enumerator, &entry, &segment))
1488 {
1489 if (wait_for_entry(this, entry, segment))
1490 {
1491 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1492 {
1493 ike_sa = entry->ike_sa;
1494 entry->checked_out = thread_current();
1495 break;
1496 }
1497 /* other threads might be waiting for this entry */
1498 entry->condvar->signal(entry->condvar);
1499 }
1500 }
1501 enumerator->destroy(enumerator);
1502
1503 if (ike_sa)
1504 {
1505 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1506 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1507 }
1508 else
1509 {
1510 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1511 }
1512 charon->bus->set_sa(charon->bus, ike_sa);
1513 return ike_sa;
1514 }
1515
1516 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1517 private_ike_sa_manager_t *this, char *name, bool child)
1518 {
1519 enumerator_t *enumerator, *children;
1520 entry_t *entry;
1521 ike_sa_t *ike_sa = NULL;
1522 child_sa_t *child_sa;
1523 u_int segment;
1524
1525 DBG2(DBG_MGR, "checkout IKE_SA by%s name '%s'", child ? " child" : "", name);
1526
1527 enumerator = create_table_enumerator(this);
1528 while (enumerator->enumerate(enumerator, &entry, &segment))
1529 {
1530 if (wait_for_entry(this, entry, segment))
1531 {
1532 /* look for a child with such a policy name ... */
1533 if (child)
1534 {
1535 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1536 while (children->enumerate(children, (void**)&child_sa))
1537 {
1538 if (streq(child_sa->get_name(child_sa), name))
1539 {
1540 ike_sa = entry->ike_sa;
1541 break;
1542 }
1543 }
1544 children->destroy(children);
1545 }
1546 else /* ... or for a IKE_SA with such a connection name */
1547 {
1548 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1549 {
1550 ike_sa = entry->ike_sa;
1551 }
1552 }
1553 /* got one, return */
1554 if (ike_sa)
1555 {
1556 entry->checked_out = thread_current();
1557 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1558 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1559 break;
1560 }
1561 /* other threads might be waiting for this entry */
1562 entry->condvar->signal(entry->condvar);
1563 }
1564 }
1565 enumerator->destroy(enumerator);
1566
1567 charon->bus->set_sa(charon->bus, ike_sa);
1568
1569 if (!ike_sa)
1570 {
1571 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1572 }
1573 return ike_sa;
1574 }
1575
1576 METHOD(ike_sa_manager_t, new_initiator_spi, bool,
1577 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1578 {
1579 ike_sa_state_t state;
1580 ike_sa_id_t *ike_sa_id;
1581 entry_t *entry;
1582 u_int segment;
1583 uint64_t new_spi, spi;
1584
1585 state = ike_sa->get_state(ike_sa);
1586 if (state != IKE_CONNECTING)
1587 {
1588 DBG1(DBG_MGR, "unable to change initiator SPI for IKE_SA in state "
1589 "%N", ike_sa_state_names, state);
1590 return FALSE;
1591 }
1592
1593 ike_sa_id = ike_sa->get_id(ike_sa);
1594 if (!ike_sa_id->is_initiator(ike_sa_id))
1595 {
1596 DBG1(DBG_MGR, "unable to change initiator SPI of IKE_SA as responder");
1597 return FALSE;
1598 }
1599
1600 if (ike_sa != charon->bus->get_sa(charon->bus))
1601 {
1602 DBG1(DBG_MGR, "unable to change initiator SPI of IKE_SA not checked "
1603 "out by current thread");
1604 return FALSE;
1605 }
1606
1607 new_spi = get_spi(this);
1608 if (!new_spi)
1609 {
1610 DBG1(DBG_MGR, "unable to allocate new initiator SPI for IKE_SA");
1611 return FALSE;
1612 }
1613
1614 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1615 {
1616 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1617 { /* it looks like flush() has been called and the SA is being deleted
1618 * anyway, no need for a new SPI */
1619 DBG2(DBG_MGR, "ignored change of initiator SPI during shutdown");
1620 unlock_single_segment(this, segment);
1621 return FALSE;
1622 }
1623 }
1624 else
1625 {
1626 DBG1(DBG_MGR, "unable to change initiator SPI of IKE_SA, not found");
1627 return FALSE;
1628 }
1629
1630 /* the hashtable row and segment are determined by the local SPI as
1631 * initiator, so if we change it the row and segment derived from it might
1632 * change as well. This could be a problem for threads waiting for the
1633 * entry (in particular those enumerating entries to check them out by
1634 * unique ID or name). In order to avoid having to drive them out and thus
1635 * preventing them from checking out the entry (even though the ID or name
1636 * will not change and enumerating it is also fine), we mask the new SPI and
1637 * merge it with the old SPI so the entry ends up in the same row/segment.
1638 * Since SPIs are 64-bit and the number of rows/segments is usually
1639 * relatively low this should not be a problem. */
1640 spi = ike_sa_id->get_initiator_spi(ike_sa_id);
1641 new_spi = (spi & (uint64_t)this->table_mask) |
1642 (new_spi & ~(uint64_t)this->table_mask);
1643
1644 DBG2(DBG_MGR, "change initiator SPI of IKE_SA %s[%u] from %.16"PRIx64" to "
1645 "%.16"PRIx64, ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa),
1646 be64toh(spi), be64toh(new_spi));
1647
1648 ike_sa_id->set_initiator_spi(ike_sa_id, new_spi);
1649 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa_id);
1650
1651 entry->condvar->signal(entry->condvar);
1652 unlock_single_segment(this, segment);
1653 return TRUE;
1654 }
1655
1656 CALLBACK(enumerator_filter_wait, bool,
1657 private_ike_sa_manager_t *this, enumerator_t *orig, va_list args)
1658 {
1659 entry_t *entry;
1660 u_int segment;
1661 ike_sa_t **out;
1662
1663 VA_ARGS_VGET(args, out);
1664
1665 while (orig->enumerate(orig, &entry, &segment))
1666 {
1667 if (wait_for_entry(this, entry, segment))
1668 {
1669 *out = entry->ike_sa;
1670 charon->bus->set_sa(charon->bus, *out);
1671 return TRUE;
1672 }
1673 }
1674 return FALSE;
1675 }
1676
1677 CALLBACK(enumerator_filter_skip, bool,
1678 private_ike_sa_manager_t *this, enumerator_t *orig, va_list args)
1679 {
1680 entry_t *entry;
1681 u_int segment;
1682 ike_sa_t **out;
1683
1684 VA_ARGS_VGET(args, out);
1685
1686 while (orig->enumerate(orig, &entry, &segment))
1687 {
1688 if (!entry->driveout_new_threads &&
1689 !entry->driveout_waiting_threads &&
1690 !entry->checked_out)
1691 {
1692 *out = entry->ike_sa;
1693 charon->bus->set_sa(charon->bus, *out);
1694 return TRUE;
1695 }
1696 }
1697 return FALSE;
1698 }
1699
1700 CALLBACK(reset_sa, void,
1701 void *data)
1702 {
1703 charon->bus->set_sa(charon->bus, NULL);
1704 }
1705
1706 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1707 private_ike_sa_manager_t* this, bool wait)
1708 {
1709 return enumerator_create_filter(create_table_enumerator(this),
1710 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1711 this, reset_sa);
1712 }
1713
1714 METHOD(ike_sa_manager_t, checkin, void,
1715 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1716 {
1717 /* to check the SA back in, we look for the pointer of the ike_sa
1718 * in all entries.
1719 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1720 * on reception of a IKE_SA_INIT response) the lookup will work but
1721 * updating of the SPI MAY be necessary...
1722 */
1723 entry_t *entry;
1724 ike_sa_id_t *ike_sa_id;
1725 host_t *other;
1726 identification_t *my_id, *other_id;
1727 u_int segment;
1728
1729 ike_sa_id = ike_sa->get_id(ike_sa);
1730 my_id = ike_sa->get_my_id(ike_sa);
1731 other_id = ike_sa->get_other_eap_id(ike_sa);
1732 other = ike_sa->get_other_host(ike_sa);
1733
1734 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1735 ike_sa->get_unique_id(ike_sa));
1736
1737 /* look for the entry */
1738 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1739 {
1740 /* ike_sa_id must be updated */
1741 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1742 /* signal waiting threads */
1743 entry->checked_out = NULL;
1744 entry->processing = -1;
1745 /* check if this SA is half-open */
1746 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1747 {
1748 /* not half open anymore */
1749 entry->half_open = FALSE;
1750 remove_half_open(this, entry);
1751 }
1752 else if (entry->half_open && !other->ip_equals(other, entry->other))
1753 {
1754 /* the other host's IP has changed, we must update the hash table */
1755 remove_half_open(this, entry);
1756 DESTROY_IF(entry->other);
1757 entry->other = other->clone(other);
1758 put_half_open(this, entry);
1759 }
1760 else if (!entry->half_open &&
1761 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1762 {
1763 /* this is a new half-open SA */
1764 entry->half_open = TRUE;
1765 entry->other = other->clone(other);
1766 put_half_open(this, entry);
1767 }
1768 entry->condvar->signal(entry->condvar);
1769 }
1770 else
1771 {
1772 entry = entry_create();
1773 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1774 entry->ike_sa = ike_sa;
1775 if (ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1776 {
1777 entry->half_open = TRUE;
1778 entry->other = other->clone(other);
1779 put_half_open(this, entry);
1780 }
1781 segment = put_entry(this, entry);
1782 }
1783 DBG2(DBG_MGR, "checkin of IKE_SA successful");
1784
1785 /* apply identities for duplicate test */
1786 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1787 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1788 entry->my_id == NULL && entry->other_id == NULL)
1789 {
1790 if (ike_sa->get_version(ike_sa) == IKEV1)
1791 {
1792 /* If authenticated and received INITIAL_CONTACT,
1793 * delete any existing IKE_SAs with that peer. */
1794 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1795 {
1796 /* We can't hold the segment locked while checking the
1797 * uniqueness as this could lead to deadlocks. We mark the
1798 * entry as checked out while we release the lock so no other
1799 * thread can acquire it. Since it is not yet in the list of
1800 * connected peers that will not cause a deadlock as no other
1801 * caller of check_unqiueness() will try to check out this SA */
1802 entry->checked_out = thread_current();
1803 unlock_single_segment(this, segment);
1804
1805 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1806 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1807
1808 /* The entry could have been modified in the mean time, e.g.
1809 * because another SA was added/removed next to it or another
1810 * thread is waiting, but it should still exist, so there is no
1811 * need for a lookup via get_entry_by... */
1812 lock_single_segment(this, segment);
1813 entry->checked_out = NULL;
1814 /* We already signaled waiting threads above, we have to do that
1815 * again after checking the SA out and back in again. */
1816 entry->condvar->signal(entry->condvar);
1817 }
1818 }
1819
1820 entry->my_id = my_id->clone(my_id);
1821 entry->other_id = other_id->clone(other_id);
1822 if (!entry->other)
1823 {
1824 entry->other = other->clone(other);
1825 }
1826 put_connected_peers(this, entry);
1827 }
1828
1829 unlock_single_segment(this, segment);
1830
1831 charon->bus->set_sa(charon->bus, NULL);
1832 }
1833
1834 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1835 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1836 {
1837 /* deletion is a bit complex, we must ensure that no thread is waiting for
1838 * this SA.
1839 * We take this SA from the table, and start signaling while threads
1840 * are in the condvar.
1841 */
1842 entry_t *entry;
1843 ike_sa_id_t *ike_sa_id;
1844 u_int segment;
1845
1846 ike_sa_id = ike_sa->get_id(ike_sa);
1847
1848 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1849 ike_sa->get_unique_id(ike_sa));
1850
1851 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1852 {
1853 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1854 { /* it looks like flush() has been called and the SA is being deleted
1855 * anyway, just check it in */
1856 DBG2(DBG_MGR, "ignored checkin and destroy of IKE_SA during shutdown");
1857 entry->checked_out = NULL;
1858 entry->condvar->broadcast(entry->condvar);
1859 unlock_single_segment(this, segment);
1860 return;
1861 }
1862
1863 /* drive out waiting threads, as we are in hurry */
1864 entry->driveout_waiting_threads = TRUE;
1865 /* mark it, so no new threads can get this entry */
1866 entry->driveout_new_threads = TRUE;
1867 /* wait until all workers have done their work */
1868 while (entry->waiting_threads)
1869 {
1870 /* wake up all */
1871 entry->condvar->broadcast(entry->condvar);
1872 /* they will wake us again when their work is done */
1873 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1874 }
1875 remove_entry(this, entry);
1876 unlock_single_segment(this, segment);
1877
1878 if (entry->half_open)
1879 {
1880 remove_half_open(this, entry);
1881 }
1882 if (entry->my_id && entry->other_id)
1883 {
1884 remove_connected_peers(this, entry);
1885 }
1886 if (entry->init_hash.ptr)
1887 {
1888 remove_init_hash(this, entry->init_hash);
1889 }
1890
1891 entry_destroy(entry);
1892
1893 DBG2(DBG_MGR, "checkin and destroy of IKE_SA successful");
1894 }
1895 else
1896 {
1897 DBG1(DBG_MGR, "tried to checkin and delete nonexisting IKE_SA");
1898 ike_sa->destroy(ike_sa);
1899 }
1900 charon->bus->set_sa(charon->bus, NULL);
1901 }
1902
1903 /**
1904 * Cleanup function for create_id_enumerator
1905 */
1906 static void id_enumerator_cleanup(linked_list_t *ids)
1907 {
1908 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1909 }
1910
1911 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1912 private_ike_sa_manager_t *this, identification_t *me,
1913 identification_t *other, int family)
1914 {
1915 table_item_t *item;
1916 u_int row, segment;
1917 rwlock_t *lock;
1918 linked_list_t *ids = NULL;
1919
1920 row = chunk_hash_inc(other->get_encoding(other),
1921 chunk_hash(me->get_encoding(me))) & this->table_mask;
1922 segment = row & this->segment_mask;
1923
1924 lock = this->connected_peers_segments[segment].lock;
1925 lock->read_lock(lock);
1926 item = this->connected_peers_table[row];
1927 while (item)
1928 {
1929 connected_peers_t *current = item->value;
1930
1931 if (connected_peers_match(current, me, other, family))
1932 {
1933 ids = current->sas->clone_offset(current->sas,
1934 offsetof(ike_sa_id_t, clone));
1935 break;
1936 }
1937 item = item->next;
1938 }
1939 lock->unlock(lock);
1940
1941 if (!ids)
1942 {
1943 return enumerator_create_empty();
1944 }
1945 return enumerator_create_cleaner(ids->create_enumerator(ids),
1946 (void*)id_enumerator_cleanup, ids);
1947 }
1948
1949 /**
1950 * Move all CHILD_SAs and virtual IPs from old to new
1951 */
1952 static void adopt_children_and_vips(ike_sa_t *old, ike_sa_t *new)
1953 {
1954 enumerator_t *enumerator;
1955 child_sa_t *child_sa;
1956 host_t *vip;
1957 int chcount = 0, vipcount = 0;
1958
1959 charon->bus->children_migrate(charon->bus, new->get_id(new),
1960 new->get_unique_id(new));
1961 enumerator = old->create_child_sa_enumerator(old);
1962 while (enumerator->enumerate(enumerator, &child_sa))
1963 {
1964 old->remove_child_sa(old, enumerator);
1965 new->add_child_sa(new, child_sa);
1966 chcount++;
1967 }
1968 enumerator->destroy(enumerator);
1969
1970 new->adopt_child_tasks(new, old);
1971
1972 enumerator = old->create_virtual_ip_enumerator(old, FALSE);
1973 while (enumerator->enumerate(enumerator, &vip))
1974 {
1975 new->add_virtual_ip(new, FALSE, vip);
1976 vipcount++;
1977 }
1978 enumerator->destroy(enumerator);
1979 /* this does not release the addresses, which is good, but it does trigger
1980 * an assign_vips(FALSE) event... */
1981 old->clear_virtual_ips(old, FALSE);
1982 /* ...trigger the analogous event on the new SA */
1983 charon->bus->set_sa(charon->bus, new);
1984 charon->bus->assign_vips(charon->bus, new, TRUE);
1985 charon->bus->children_migrate(charon->bus, NULL, 0);
1986 charon->bus->set_sa(charon->bus, old);
1987
1988 if (chcount || vipcount)
1989 {
1990 DBG1(DBG_IKE, "detected reauth of existing IKE_SA, adopting %d "
1991 "children and %d virtual IPs", chcount, vipcount);
1992 }
1993 }
1994
1995 /**
1996 * Delete an existing IKE_SA due to a unique replace policy
1997 */
1998 static status_t enforce_replace(private_ike_sa_manager_t *this,
1999 ike_sa_t *duplicate, ike_sa_t *new,
2000 identification_t *other, host_t *host)
2001 {
2002 charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
2003
2004 if (host->equals(host, duplicate->get_other_host(duplicate)))
2005 {
2006 /* looks like a reauthentication attempt */
2007 if (!new->has_condition(new, COND_INIT_CONTACT_SEEN) &&
2008 new->get_version(new) == IKEV1)
2009 {
2010 /* IKEv1 implicitly takes over children, IKEv2 recreates them
2011 * explicitly. */
2012 adopt_children_and_vips(duplicate, new);
2013 }
2014 /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
2015 * peers need to complete the new SA first, otherwise the quick modes
2016 * might get lost. For IKEv2 we do the same, as we want overlapping
2017 * CHILD_SAs to keep connectivity up. */
2018 lib->scheduler->schedule_job(lib->scheduler, (job_t*)
2019 delete_ike_sa_job_create(duplicate->get_id(duplicate), TRUE), 10);
2020 DBG1(DBG_IKE, "schedule delete of duplicate IKE_SA for peer '%Y' due "
2021 "to uniqueness policy and suspected reauthentication", other);
2022 return SUCCESS;
2023 }
2024 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer '%Y' due to "
2025 "uniqueness policy", other);
2026 return duplicate->delete(duplicate, FALSE);
2027 }
2028
2029 METHOD(ike_sa_manager_t, check_uniqueness, bool,
2030 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
2031 {
2032 bool cancel = FALSE;
2033 peer_cfg_t *peer_cfg;
2034 unique_policy_t policy;
2035 enumerator_t *enumerator;
2036 ike_sa_id_t *id = NULL;
2037 identification_t *me, *other;
2038 host_t *other_host;
2039
2040 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
2041 policy = peer_cfg->get_unique_policy(peer_cfg);
2042 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
2043 {
2044 return FALSE;
2045 }
2046 me = ike_sa->get_my_id(ike_sa);
2047 other = ike_sa->get_other_eap_id(ike_sa);
2048 other_host = ike_sa->get_other_host(ike_sa);
2049
2050 enumerator = create_id_enumerator(this, me, other,
2051 other_host->get_family(other_host));
2052 while (enumerator->enumerate(enumerator, &id))
2053 {
2054 status_t status = SUCCESS;
2055 ike_sa_t *duplicate;
2056
2057 duplicate = checkout(this, id);
2058 if (!duplicate)
2059 {
2060 continue;
2061 }
2062 if (force_replace)
2063 {
2064 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
2065 "received INITIAL_CONTACT", other);
2066 charon->bus->ike_updown(charon->bus, duplicate, FALSE);
2067 checkin_and_destroy(this, duplicate);
2068 continue;
2069 }
2070 peer_cfg = duplicate->get_peer_cfg(duplicate);
2071 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
2072 {
2073 switch (duplicate->get_state(duplicate))
2074 {
2075 case IKE_ESTABLISHED:
2076 case IKE_REKEYING:
2077 switch (policy)
2078 {
2079 case UNIQUE_REPLACE:
2080 status = enforce_replace(this, duplicate, ike_sa,
2081 other, other_host);
2082 break;
2083 case UNIQUE_KEEP:
2084 /* potential reauthentication? */
2085 if (!other_host->equals(other_host,
2086 duplicate->get_other_host(duplicate)))
2087 {
2088 cancel = TRUE;
2089 /* we keep the first IKE_SA and delete all
2090 * other duplicates that might exist */
2091 policy = UNIQUE_REPLACE;
2092 }
2093 break;
2094 default:
2095 break;
2096 }
2097 break;
2098 default:
2099 break;
2100 }
2101 }
2102 if (status == DESTROY_ME)
2103 {
2104 checkin_and_destroy(this, duplicate);
2105 }
2106 else
2107 {
2108 checkin(this, duplicate);
2109 }
2110 }
2111 enumerator->destroy(enumerator);
2112 /* reset thread's current IKE_SA after checkin */
2113 charon->bus->set_sa(charon->bus, ike_sa);
2114 return cancel;
2115 }
2116
2117 METHOD(ike_sa_manager_t, has_contact, bool,
2118 private_ike_sa_manager_t *this, identification_t *me,
2119 identification_t *other, int family)
2120 {
2121 table_item_t *item;
2122 u_int row, segment;
2123 rwlock_t *lock;
2124 bool found = FALSE;
2125
2126 row = chunk_hash_inc(other->get_encoding(other),
2127 chunk_hash(me->get_encoding(me))) & this->table_mask;
2128 segment = row & this->segment_mask;
2129 lock = this->connected_peers_segments[segment].lock;
2130 lock->read_lock(lock);
2131 item = this->connected_peers_table[row];
2132 while (item)
2133 {
2134 if (connected_peers_match(item->value, me, other, family))
2135 {
2136 found = TRUE;
2137 break;
2138 }
2139 item = item->next;
2140 }
2141 lock->unlock(lock);
2142
2143 return found;
2144 }
2145
2146 METHOD(ike_sa_manager_t, get_count, u_int,
2147 private_ike_sa_manager_t *this)
2148 {
2149 return (u_int)ref_cur(&this->total_sa_count);
2150 }
2151
2152 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
2153 private_ike_sa_manager_t *this, host_t *ip, bool responder_only)
2154 {
2155 table_item_t *item;
2156 u_int row, segment;
2157 rwlock_t *lock;
2158 chunk_t addr;
2159 u_int count = 0;
2160
2161 if (ip)
2162 {
2163 addr = ip->get_address(ip);
2164 row = chunk_hash(addr) & this->table_mask;
2165 segment = row & this->segment_mask;
2166 lock = this->half_open_segments[segment].lock;
2167 lock->read_lock(lock);
2168 item = this->half_open_table[row];
2169 while (item)
2170 {
2171 half_open_t *half_open = item->value;
2172
2173 if (chunk_equals(addr, half_open->other))
2174 {
2175 count = responder_only ? half_open->count_responder
2176 : half_open->count;
2177 break;
2178 }
2179 item = item->next;
2180 }
2181 lock->unlock(lock);
2182 }
2183 else
2184 {
2185 count = responder_only ? (u_int)ref_cur(&this->half_open_count_responder)
2186 : (u_int)ref_cur(&this->half_open_count);
2187 }
2188 return count;
2189 }
2190
2191 METHOD(ike_sa_manager_t, set_spi_cb, void,
2192 private_ike_sa_manager_t *this, spi_cb_t callback, void *data)
2193 {
2194 this->spi_lock->write_lock(this->spi_lock);
2195 this->spi_cb.cb = callback;
2196 this->spi_cb.data = data;
2197 this->spi_lock->unlock(this->spi_lock);
2198 }
2199
2200 /**
2201 * Destroy all entries
2202 */
2203 static void destroy_all_entries(private_ike_sa_manager_t *this)
2204 {
2205 enumerator_t *enumerator;
2206 entry_t *entry;
2207 u_int segment;
2208
2209 enumerator = create_table_enumerator(this);
2210 while (enumerator->enumerate(enumerator, &entry, &segment))
2211 {
2212 charon->bus->set_sa(charon->bus, entry->ike_sa);
2213 if (entry->half_open)
2214 {
2215 remove_half_open(this, entry);
2216 }
2217 if (entry->my_id && entry->other_id)
2218 {
2219 remove_connected_peers(this, entry);
2220 }
2221 if (entry->init_hash.ptr)
2222 {
2223 remove_init_hash(this, entry->init_hash);
2224 }
2225 remove_entry_at((private_enumerator_t*)enumerator);
2226 entry_destroy(entry);
2227 }
2228 enumerator->destroy(enumerator);
2229 charon->bus->set_sa(charon->bus, NULL);
2230 }
2231
2232 METHOD(ike_sa_manager_t, flush, void,
2233 private_ike_sa_manager_t *this)
2234 {
2235 enumerator_t *enumerator;
2236 entry_t *entry;
2237 u_int segment;
2238
2239 lock_all_segments(this);
2240 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
2241 /* Step 1: drive out all waiting threads */
2242 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
2243 enumerator = create_table_enumerator(this);
2244 while (enumerator->enumerate(enumerator, &entry, &segment))
2245 {
2246 /* do not accept new threads, drive out waiting threads */
2247 entry->driveout_new_threads = TRUE;
2248 entry->driveout_waiting_threads = TRUE;
2249 }
2250 enumerator->destroy(enumerator);
2251 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
2252 /* Step 2: wait until all are gone */
2253 enumerator = create_table_enumerator(this);
2254 while (enumerator->enumerate(enumerator, &entry, &segment))
2255 {
2256 while (entry->waiting_threads || entry->checked_out)
2257 {
2258 /* wake up all */
2259 entry->condvar->broadcast(entry->condvar);
2260 /* go sleeping until they are gone */
2261 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
2262 }
2263 }
2264 enumerator->destroy(enumerator);
2265 DBG2(DBG_MGR, "delete all IKE_SA's");
2266 /* Step 3: initiate deletion of all IKE_SAs */
2267 enumerator = create_table_enumerator(this);
2268 while (enumerator->enumerate(enumerator, &entry, &segment))
2269 {
2270 charon->bus->set_sa(charon->bus, entry->ike_sa);
2271 entry->ike_sa->delete(entry->ike_sa, TRUE);
2272 }
2273 enumerator->destroy(enumerator);
2274
2275 DBG2(DBG_MGR, "destroy all entries");
2276 /* Step 4: destroy all entries */
2277 destroy_all_entries(this);
2278 unlock_all_segments(this);
2279
2280 this->spi_lock->write_lock(this->spi_lock);
2281 DESTROY_IF(this->rng);
2282 this->rng = NULL;
2283 this->spi_cb.cb = NULL;
2284 this->spi_cb.data = NULL;
2285 this->spi_lock->unlock(this->spi_lock);
2286 }
2287
2288 METHOD(ike_sa_manager_t, destroy, void,
2289 private_ike_sa_manager_t *this)
2290 {
2291 u_int i;
2292
2293 /* in case new SAs were checked in after flush() was called */
2294 lock_all_segments(this);
2295 destroy_all_entries(this);
2296 unlock_all_segments(this);
2297
2298 free(this->ike_sa_table);
2299 free(this->half_open_table);
2300 free(this->connected_peers_table);
2301 free(this->init_hashes_table);
2302 for (i = 0; i < this->segment_count; i++)
2303 {
2304 this->segments[i].mutex->destroy(this->segments[i].mutex);
2305 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2306 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2307 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2308 }
2309 free(this->segments);
2310 free(this->half_open_segments);
2311 free(this->connected_peers_segments);
2312 free(this->init_hashes_segments);
2313
2314 this->spi_lock->destroy(this->spi_lock);
2315 free(this);
2316 }
2317
2318 /**
2319 * This function returns the next-highest power of two for the given number.
2320 * The algorithm works by setting all bits on the right-hand side of the most
2321 * significant 1 to 1 and then increments the whole number so it rolls over
2322 * to the nearest power of two. Note: returns 0 for n == 0
2323 */
2324 static u_int get_nearest_powerof2(u_int n)
2325 {
2326 u_int i;
2327
2328 --n;
2329 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2330 {
2331 n |= n >> i;
2332 }
2333 return ++n;
2334 }
2335
2336 /*
2337 * Described in header.
2338 */
2339 ike_sa_manager_t *ike_sa_manager_create()
2340 {
2341 private_ike_sa_manager_t *this;
2342 u_int i;
2343
2344 INIT(this,
2345 .public = {
2346 .checkout = _checkout,
2347 .checkout_new = _checkout_new,
2348 .checkout_by_message = _checkout_by_message,
2349 .checkout_by_config = _checkout_by_config,
2350 .checkout_by_id = _checkout_by_id,
2351 .checkout_by_name = _checkout_by_name,
2352 .new_initiator_spi = _new_initiator_spi,
2353 .check_uniqueness = _check_uniqueness,
2354 .has_contact = _has_contact,
2355 .create_enumerator = _create_enumerator,
2356 .create_id_enumerator = _create_id_enumerator,
2357 .checkin = _checkin,
2358 .checkin_and_destroy = _checkin_and_destroy,
2359 .get_count = _get_count,
2360 .get_half_open_count = _get_half_open_count,
2361 .flush = _flush,
2362 .set_spi_cb = _set_spi_cb,
2363 .destroy = _destroy,
2364 },
2365 );
2366
2367 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2368 if (this->rng == NULL)
2369 {
2370 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2371 free(this);
2372 return NULL;
2373 }
2374 this->spi_lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2375
2376 this->ikesa_limit = lib->settings->get_int(lib->settings,
2377 "%s.ikesa_limit", 0, lib->ns);
2378
2379 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2380 lib->settings, "%s.ikesa_table_size",
2381 DEFAULT_HASHTABLE_SIZE, lib->ns));
2382 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2383 this->table_mask = this->table_size - 1;
2384
2385 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2386 lib->settings, "%s.ikesa_table_segments",
2387 DEFAULT_SEGMENT_COUNT, lib->ns));
2388 this->segment_count = max(1, min(this->segment_count, this->table_size));
2389 this->segment_mask = this->segment_count - 1;
2390
2391 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2392 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2393 for (i = 0; i < this->segment_count; i++)
2394 {
2395 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2396 }
2397
2398 /* we use the same table parameters for the table to track half-open SAs */
2399 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2400 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2401 for (i = 0; i < this->segment_count; i++)
2402 {
2403 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2404 }
2405
2406 /* also for the hash table used for duplicate tests */
2407 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2408 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2409 for (i = 0; i < this->segment_count; i++)
2410 {
2411 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2412 }
2413
2414 /* and again for the table of hashes of seen initial IKE messages */
2415 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2416 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2417 for (i = 0; i < this->segment_count; i++)
2418 {
2419 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2420 }
2421
2422 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2423 "%s.reuse_ikesa", TRUE, lib->ns);
2424 return &this->public;
2425 }