]> git.ipfire.org Git - thirdparty/strongswan.git/blob - src/libcharon/sa/ike_sa_manager.c
Merge branch 'ikev1-rekeying'
[thirdparty/strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2012 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 */
18
19 #include <string.h>
20
21 #include "ike_sa_manager.h"
22
23 #include <daemon.h>
24 #include <sa/ike_sa_id.h>
25 #include <bus/bus.h>
26 #include <threading/condvar.h>
27 #include <threading/mutex.h>
28 #include <threading/rwlock.h>
29 #include <collections/linked_list.h>
30 #include <crypto/hashers/hasher.h>
31
32 /* the default size of the hash table (MUST be a power of 2) */
33 #define DEFAULT_HASHTABLE_SIZE 1
34
35 /* the maximum size of the hash table (MUST be a power of 2) */
36 #define MAX_HASHTABLE_SIZE (1 << 30)
37
38 /* the default number of segments (MUST be a power of 2) */
39 #define DEFAULT_SEGMENT_COUNT 1
40
41 typedef struct entry_t entry_t;
42
43 /**
44 * An entry in the linked list, contains IKE_SA, locking and lookup data.
45 */
46 struct entry_t {
47
48 /**
49 * Number of threads waiting for this ike_sa_t object.
50 */
51 int waiting_threads;
52
53 /**
54 * Condvar where threads can wait until ike_sa_t object is free for use again.
55 */
56 condvar_t *condvar;
57
58 /**
59 * Is this ike_sa currently checked out?
60 */
61 bool checked_out;
62
63 /**
64 * Does this SA drives out new threads?
65 */
66 bool driveout_new_threads;
67
68 /**
69 * Does this SA drives out waiting threads?
70 */
71 bool driveout_waiting_threads;
72
73 /**
74 * Identification of an IKE_SA (SPIs).
75 */
76 ike_sa_id_t *ike_sa_id;
77
78 /**
79 * The contained ike_sa_t object.
80 */
81 ike_sa_t *ike_sa;
82
83 /**
84 * hash of the IKE_SA_INIT message, used to detect retransmissions
85 */
86 chunk_t init_hash;
87
88 /**
89 * remote host address, required for DoS detection and duplicate
90 * checking (host with same my_id and other_id is *not* considered
91 * a duplicate if the address family differs)
92 */
93 host_t *other;
94
95 /**
96 * As responder: Is this SA half-open?
97 */
98 bool half_open;
99
100 /**
101 * own identity, required for duplicate checking
102 */
103 identification_t *my_id;
104
105 /**
106 * remote identity, required for duplicate checking
107 */
108 identification_t *other_id;
109
110 /**
111 * message ID or hash of currently processing message, -1 if none
112 */
113 u_int32_t processing;
114 };
115
116 /**
117 * Implementation of entry_t.destroy.
118 */
119 static status_t entry_destroy(entry_t *this)
120 {
121 /* also destroy IKE SA */
122 this->ike_sa->destroy(this->ike_sa);
123 this->ike_sa_id->destroy(this->ike_sa_id);
124 chunk_free(&this->init_hash);
125 DESTROY_IF(this->other);
126 DESTROY_IF(this->my_id);
127 DESTROY_IF(this->other_id);
128 this->condvar->destroy(this->condvar);
129 free(this);
130 return SUCCESS;
131 }
132
133 /**
134 * Creates a new entry for the ike_sa_t list.
135 */
136 static entry_t *entry_create()
137 {
138 entry_t *this;
139
140 INIT(this,
141 .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
142 .processing = -1,
143 );
144
145 return this;
146 }
147
148 /**
149 * Function that matches entry_t objects by ike_sa_id_t.
150 */
151 static bool entry_match_by_id(entry_t *entry, ike_sa_id_t *id)
152 {
153 if (id->equals(id, entry->ike_sa_id))
154 {
155 return TRUE;
156 }
157 if ((id->get_responder_spi(id) == 0 ||
158 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
159 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
160 {
161 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
162 return TRUE;
163 }
164 return FALSE;
165 }
166
167 /**
168 * Function that matches entry_t objects by ike_sa_t pointers.
169 */
170 static bool entry_match_by_sa(entry_t *entry, ike_sa_t *ike_sa)
171 {
172 return entry->ike_sa == ike_sa;
173 }
174
175 /**
176 * Hash function for ike_sa_id_t objects.
177 */
178 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
179 {
180 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
181 * locally unique, so we use our randomly allocated SPI whether we are
182 * initiator or responder to ensure a good distribution. The latter is not
183 * possible for IKEv1 as we don't know whether we are original initiator or
184 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
185 * SPIs (Cookies) to be allocated near random (we allocate them randomly
186 * anyway) it seems safe to always use the initiator SPI. */
187 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
188 ike_sa_id->is_initiator(ike_sa_id))
189 {
190 return ike_sa_id->get_initiator_spi(ike_sa_id);
191 }
192 return ike_sa_id->get_responder_spi(ike_sa_id);
193 }
194
195 typedef struct half_open_t half_open_t;
196
197 /**
198 * Struct to manage half-open IKE_SAs per peer.
199 */
200 struct half_open_t {
201 /** chunk of remote host address */
202 chunk_t other;
203
204 /** the number of half-open IKE_SAs with that host */
205 u_int count;
206 };
207
208 /**
209 * Destroys a half_open_t object.
210 */
211 static void half_open_destroy(half_open_t *this)
212 {
213 chunk_free(&this->other);
214 free(this);
215 }
216
217 typedef struct connected_peers_t connected_peers_t;
218
219 struct connected_peers_t {
220 /** own identity */
221 identification_t *my_id;
222
223 /** remote identity */
224 identification_t *other_id;
225
226 /** ip address family of peer */
227 int family;
228
229 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
230 linked_list_t *sas;
231 };
232
233 static void connected_peers_destroy(connected_peers_t *this)
234 {
235 this->my_id->destroy(this->my_id);
236 this->other_id->destroy(this->other_id);
237 this->sas->destroy(this->sas);
238 free(this);
239 }
240
241 /**
242 * Function that matches connected_peers_t objects by the given ids.
243 */
244 static inline bool connected_peers_match(connected_peers_t *connected_peers,
245 identification_t *my_id, identification_t *other_id,
246 int family)
247 {
248 return my_id->equals(my_id, connected_peers->my_id) &&
249 other_id->equals(other_id, connected_peers->other_id) &&
250 (!family || family == connected_peers->family);
251 }
252
253 typedef struct init_hash_t init_hash_t;
254
255 struct init_hash_t {
256 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
257 chunk_t hash;
258
259 /** our SPI allocated for the IKE_SA based on this message */
260 u_int64_t our_spi;
261 };
262
263 typedef struct segment_t segment_t;
264
265 /**
266 * Struct to manage segments of the hash table.
267 */
268 struct segment_t {
269 /** mutex to access a segment exclusively */
270 mutex_t *mutex;
271
272 /** the number of entries in this segment */
273 u_int count;
274 };
275
276 typedef struct shareable_segment_t shareable_segment_t;
277
278 /**
279 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
280 */
281 struct shareable_segment_t {
282 /** rwlock to access a segment non-/exclusively */
283 rwlock_t *lock;
284
285 /** the number of entries in this segment - in case of the "half-open table"
286 * it's the sum of all half_open_t.count in a segment. */
287 u_int count;
288 };
289
290 typedef struct table_item_t table_item_t;
291
292 /**
293 * Instead of using linked_list_t for each bucket we store the data in our own
294 * list to save memory.
295 */
296 struct table_item_t {
297 /** data of this item */
298 void *value;
299
300 /** next item in the overflow list */
301 table_item_t *next;
302 };
303
304 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
305
306 /**
307 * Additional private members of ike_sa_manager_t.
308 */
309 struct private_ike_sa_manager_t {
310 /**
311 * Public interface of ike_sa_manager_t.
312 */
313 ike_sa_manager_t public;
314
315 /**
316 * Hash table with entries for the ike_sa_t objects.
317 */
318 table_item_t **ike_sa_table;
319
320 /**
321 * The size of the hash table.
322 */
323 u_int table_size;
324
325 /**
326 * Mask to map the hashes to table rows.
327 */
328 u_int table_mask;
329
330 /**
331 * Segments of the hash table.
332 */
333 segment_t *segments;
334
335 /**
336 * The number of segments.
337 */
338 u_int segment_count;
339
340 /**
341 * Mask to map a table row to a segment.
342 */
343 u_int segment_mask;
344
345 /**
346 * Hash table with half_open_t objects.
347 */
348 table_item_t **half_open_table;
349
350 /**
351 * Segments of the "half-open" hash table.
352 */
353 shareable_segment_t *half_open_segments;
354
355 /**
356 * Hash table with connected_peers_t objects.
357 */
358 table_item_t **connected_peers_table;
359
360 /**
361 * Segments of the "connected peers" hash table.
362 */
363 shareable_segment_t *connected_peers_segments;
364
365 /**
366 * Hash table with init_hash_t objects.
367 */
368 table_item_t **init_hashes_table;
369
370 /**
371 * Segments of the "hashes" hash table.
372 */
373 segment_t *init_hashes_segments;
374
375 /**
376 * RNG to get random SPIs for our side
377 */
378 rng_t *rng;
379
380 /**
381 * SHA1 hasher for IKE_SA_INIT retransmit detection
382 */
383 hasher_t *hasher;
384
385 /**
386 * reuse existing IKE_SAs in checkout_by_config
387 */
388 bool reuse_ikesa;
389
390 /**
391 * Configured IKE_SA limit, if any
392 */
393 u_int ikesa_limit;
394 };
395
396 /**
397 * Acquire a lock to access the segment of the table row with the given index.
398 * It also works with the segment index directly.
399 */
400 static inline void lock_single_segment(private_ike_sa_manager_t *this,
401 u_int index)
402 {
403 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
404 lock->lock(lock);
405 }
406
407 /**
408 * Release the lock required to access the segment of the table row with the given index.
409 * It also works with the segment index directly.
410 */
411 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
412 u_int index)
413 {
414 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
415 lock->unlock(lock);
416 }
417
418 /**
419 * Lock all segments
420 */
421 static void lock_all_segments(private_ike_sa_manager_t *this)
422 {
423 u_int i;
424
425 for (i = 0; i < this->segment_count; i++)
426 {
427 this->segments[i].mutex->lock(this->segments[i].mutex);
428 }
429 }
430
431 /**
432 * Unlock all segments
433 */
434 static void unlock_all_segments(private_ike_sa_manager_t *this)
435 {
436 u_int i;
437
438 for (i = 0; i < this->segment_count; i++)
439 {
440 this->segments[i].mutex->unlock(this->segments[i].mutex);
441 }
442 }
443
444 typedef struct private_enumerator_t private_enumerator_t;
445
446 /**
447 * hash table enumerator implementation
448 */
449 struct private_enumerator_t {
450
451 /**
452 * implements enumerator interface
453 */
454 enumerator_t enumerator;
455
456 /**
457 * associated ike_sa_manager_t
458 */
459 private_ike_sa_manager_t *manager;
460
461 /**
462 * current segment index
463 */
464 u_int segment;
465
466 /**
467 * currently enumerating entry
468 */
469 entry_t *entry;
470
471 /**
472 * current table row index
473 */
474 u_int row;
475
476 /**
477 * current table item
478 */
479 table_item_t *current;
480
481 /**
482 * previous table item
483 */
484 table_item_t *prev;
485 };
486
487 METHOD(enumerator_t, enumerate, bool,
488 private_enumerator_t *this, entry_t **entry, u_int *segment)
489 {
490 if (this->entry)
491 {
492 this->entry->condvar->signal(this->entry->condvar);
493 this->entry = NULL;
494 }
495 while (this->segment < this->manager->segment_count)
496 {
497 while (this->row < this->manager->table_size)
498 {
499 this->prev = this->current;
500 if (this->current)
501 {
502 this->current = this->current->next;
503 }
504 else
505 {
506 lock_single_segment(this->manager, this->segment);
507 this->current = this->manager->ike_sa_table[this->row];
508 }
509 if (this->current)
510 {
511 *entry = this->entry = this->current->value;
512 *segment = this->segment;
513 return TRUE;
514 }
515 unlock_single_segment(this->manager, this->segment);
516 this->row += this->manager->segment_count;
517 }
518 this->segment++;
519 this->row = this->segment;
520 }
521 return FALSE;
522 }
523
524 METHOD(enumerator_t, enumerator_destroy, void,
525 private_enumerator_t *this)
526 {
527 if (this->entry)
528 {
529 this->entry->condvar->signal(this->entry->condvar);
530 }
531 if (this->current)
532 {
533 unlock_single_segment(this->manager, this->segment);
534 }
535 free(this);
536 }
537
538 /**
539 * Creates an enumerator to enumerate the entries in the hash table.
540 */
541 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
542 {
543 private_enumerator_t *enumerator;
544
545 INIT(enumerator,
546 .enumerator = {
547 .enumerate = (void*)_enumerate,
548 .destroy = _enumerator_destroy,
549 },
550 .manager = this,
551 );
552 return &enumerator->enumerator;
553 }
554
555 /**
556 * Put an entry into the hash table.
557 * Note: The caller has to unlock the returned segment.
558 */
559 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
560 {
561 table_item_t *current, *item;
562 u_int row, segment;
563
564 INIT(item,
565 .value = entry,
566 );
567
568 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
569 segment = row & this->segment_mask;
570
571 lock_single_segment(this, segment);
572 current = this->ike_sa_table[row];
573 if (current)
574 { /* insert at the front of current bucket */
575 item->next = current;
576 }
577 this->ike_sa_table[row] = item;
578 this->segments[segment].count++;
579 return segment;
580 }
581
582 /**
583 * Remove an entry from the hash table.
584 * Note: The caller MUST have a lock on the segment of this entry.
585 */
586 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
587 {
588 table_item_t *item, *prev = NULL;
589 u_int row, segment;
590
591 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
592 segment = row & this->segment_mask;
593 item = this->ike_sa_table[row];
594 while (item)
595 {
596 if (item->value == entry)
597 {
598 if (prev)
599 {
600 prev->next = item->next;
601 }
602 else
603 {
604 this->ike_sa_table[row] = item->next;
605 }
606 this->segments[segment].count--;
607 free(item);
608 break;
609 }
610 prev = item;
611 item = item->next;
612 }
613 }
614
615 /**
616 * Remove the entry at the current enumerator position.
617 */
618 static void remove_entry_at(private_enumerator_t *this)
619 {
620 this->entry = NULL;
621 if (this->current)
622 {
623 table_item_t *current = this->current;
624
625 this->manager->segments[this->segment].count--;
626 this->current = this->prev;
627
628 if (this->prev)
629 {
630 this->prev->next = current->next;
631 }
632 else
633 {
634 this->manager->ike_sa_table[this->row] = current->next;
635 unlock_single_segment(this->manager, this->segment);
636 }
637 free(current);
638 }
639 }
640
641 /**
642 * Find an entry using the provided match function to compare the entries for
643 * equality.
644 */
645 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
646 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
647 linked_list_match_t match, void *param)
648 {
649 table_item_t *item;
650 u_int row, seg;
651
652 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
653 seg = row & this->segment_mask;
654
655 lock_single_segment(this, seg);
656 item = this->ike_sa_table[row];
657 while (item)
658 {
659 if (match(item->value, param))
660 {
661 *entry = item->value;
662 *segment = seg;
663 /* the locked segment has to be unlocked by the caller */
664 return SUCCESS;
665 }
666 item = item->next;
667 }
668 unlock_single_segment(this, seg);
669 return NOT_FOUND;
670 }
671
672 /**
673 * Find an entry by ike_sa_id_t.
674 * Note: On SUCCESS, the caller has to unlock the segment.
675 */
676 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
677 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
678 {
679 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
680 (linked_list_match_t)entry_match_by_id, ike_sa_id);
681 }
682
683 /**
684 * Find an entry by IKE_SA pointer.
685 * Note: On SUCCESS, the caller has to unlock the segment.
686 */
687 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
688 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
689 {
690 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
691 (linked_list_match_t)entry_match_by_sa, ike_sa);
692 }
693
694 /**
695 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
696 * acquirable.
697 */
698 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
699 u_int segment)
700 {
701 if (entry->driveout_new_threads)
702 {
703 /* we are not allowed to get this */
704 return FALSE;
705 }
706 while (entry->checked_out && !entry->driveout_waiting_threads)
707 {
708 /* so wait until we can get it for us.
709 * we register us as waiting. */
710 entry->waiting_threads++;
711 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
712 entry->waiting_threads--;
713 }
714 /* hm, a deletion request forbids us to get this SA, get next one */
715 if (entry->driveout_waiting_threads)
716 {
717 /* we must signal here, others may be waiting on it, too */
718 entry->condvar->signal(entry->condvar);
719 return FALSE;
720 }
721 return TRUE;
722 }
723
724 /**
725 * Put a half-open SA into the hash table.
726 */
727 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
728 {
729 table_item_t *item;
730 u_int row, segment;
731 rwlock_t *lock;
732 half_open_t *half_open;
733 chunk_t addr;
734
735 addr = entry->other->get_address(entry->other);
736 row = chunk_hash(addr) & this->table_mask;
737 segment = row & this->segment_mask;
738 lock = this->half_open_segments[segment].lock;
739 lock->write_lock(lock);
740 item = this->half_open_table[row];
741 while (item)
742 {
743 half_open = item->value;
744
745 if (chunk_equals(addr, half_open->other))
746 {
747 half_open->count++;
748 break;
749 }
750 item = item->next;
751 }
752
753 if (!item)
754 {
755 INIT(half_open,
756 .other = chunk_clone(addr),
757 .count = 1,
758 );
759 INIT(item,
760 .value = half_open,
761 .next = this->half_open_table[row],
762 );
763 this->half_open_table[row] = item;
764 }
765 this->half_open_segments[segment].count++;
766 lock->unlock(lock);
767 }
768
769 /**
770 * Remove a half-open SA from the hash table.
771 */
772 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
773 {
774 table_item_t *item, *prev = NULL;
775 u_int row, segment;
776 rwlock_t *lock;
777 chunk_t addr;
778
779 addr = entry->other->get_address(entry->other);
780 row = chunk_hash(addr) & this->table_mask;
781 segment = row & this->segment_mask;
782 lock = this->half_open_segments[segment].lock;
783 lock->write_lock(lock);
784 item = this->half_open_table[row];
785 while (item)
786 {
787 half_open_t *half_open = item->value;
788
789 if (chunk_equals(addr, half_open->other))
790 {
791 if (--half_open->count == 0)
792 {
793 if (prev)
794 {
795 prev->next = item->next;
796 }
797 else
798 {
799 this->half_open_table[row] = item->next;
800 }
801 half_open_destroy(half_open);
802 free(item);
803 }
804 this->half_open_segments[segment].count--;
805 break;
806 }
807 prev = item;
808 item = item->next;
809 }
810 lock->unlock(lock);
811 }
812
813 /**
814 * Put an SA between two peers into the hash table.
815 */
816 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
817 {
818 table_item_t *item;
819 u_int row, segment;
820 rwlock_t *lock;
821 connected_peers_t *connected_peers;
822 chunk_t my_id, other_id;
823 int family;
824
825 my_id = entry->my_id->get_encoding(entry->my_id);
826 other_id = entry->other_id->get_encoding(entry->other_id);
827 family = entry->other->get_family(entry->other);
828 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
829 segment = row & this->segment_mask;
830 lock = this->connected_peers_segments[segment].lock;
831 lock->write_lock(lock);
832 item = this->connected_peers_table[row];
833 while (item)
834 {
835 connected_peers = item->value;
836
837 if (connected_peers_match(connected_peers, entry->my_id,
838 entry->other_id, family))
839 {
840 if (connected_peers->sas->find_first(connected_peers->sas,
841 (linked_list_match_t)entry->ike_sa_id->equals,
842 NULL, entry->ike_sa_id) == SUCCESS)
843 {
844 lock->unlock(lock);
845 return;
846 }
847 break;
848 }
849 item = item->next;
850 }
851
852 if (!item)
853 {
854 INIT(connected_peers,
855 .my_id = entry->my_id->clone(entry->my_id),
856 .other_id = entry->other_id->clone(entry->other_id),
857 .family = family,
858 .sas = linked_list_create(),
859 );
860 INIT(item,
861 .value = connected_peers,
862 .next = this->connected_peers_table[row],
863 );
864 this->connected_peers_table[row] = item;
865 }
866 connected_peers->sas->insert_last(connected_peers->sas,
867 entry->ike_sa_id->clone(entry->ike_sa_id));
868 this->connected_peers_segments[segment].count++;
869 lock->unlock(lock);
870 }
871
872 /**
873 * Remove an SA between two peers from the hash table.
874 */
875 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
876 {
877 table_item_t *item, *prev = NULL;
878 u_int row, segment;
879 rwlock_t *lock;
880 chunk_t my_id, other_id;
881 int family;
882
883 my_id = entry->my_id->get_encoding(entry->my_id);
884 other_id = entry->other_id->get_encoding(entry->other_id);
885 family = entry->other->get_family(entry->other);
886
887 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
888 segment = row & this->segment_mask;
889
890 lock = this->connected_peers_segments[segment].lock;
891 lock->write_lock(lock);
892 item = this->connected_peers_table[row];
893 while (item)
894 {
895 connected_peers_t *current = item->value;
896
897 if (connected_peers_match(current, entry->my_id, entry->other_id,
898 family))
899 {
900 enumerator_t *enumerator;
901 ike_sa_id_t *ike_sa_id;
902
903 enumerator = current->sas->create_enumerator(current->sas);
904 while (enumerator->enumerate(enumerator, &ike_sa_id))
905 {
906 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
907 {
908 current->sas->remove_at(current->sas, enumerator);
909 ike_sa_id->destroy(ike_sa_id);
910 this->connected_peers_segments[segment].count--;
911 break;
912 }
913 }
914 enumerator->destroy(enumerator);
915 if (current->sas->get_count(current->sas) == 0)
916 {
917 if (prev)
918 {
919 prev->next = item->next;
920 }
921 else
922 {
923 this->connected_peers_table[row] = item->next;
924 }
925 connected_peers_destroy(current);
926 free(item);
927 }
928 break;
929 }
930 prev = item;
931 item = item->next;
932 }
933 lock->unlock(lock);
934 }
935
936 /**
937 * Get a random SPI for new IKE_SAs
938 */
939 static u_int64_t get_spi(private_ike_sa_manager_t *this)
940 {
941 u_int64_t spi;
942
943 if (this->rng &&
944 this->rng->get_bytes(this->rng, sizeof(spi), (u_int8_t*)&spi))
945 {
946 return spi;
947 }
948 return 0;
949 }
950
951 /**
952 * Calculate the hash of the initial IKE message. Memory for the hash is
953 * allocated on success.
954 *
955 * @returns TRUE on success
956 */
957 static bool get_init_hash(private_ike_sa_manager_t *this, message_t *message,
958 chunk_t *hash)
959 {
960 host_t *src;
961
962 if (!this->hasher)
963 { /* this might be the case when flush() has been called */
964 return FALSE;
965 }
966 if (message->get_first_payload_type(message) == FRAGMENT_V1)
967 { /* only hash the source IP, port and SPI for fragmented init messages */
968 u_int16_t port;
969 u_int64_t spi;
970
971 src = message->get_source(message);
972 if (!this->hasher->allocate_hash(this->hasher,
973 src->get_address(src), NULL))
974 {
975 return FALSE;
976 }
977 port = src->get_port(src);
978 if (!this->hasher->allocate_hash(this->hasher,
979 chunk_from_thing(port), NULL))
980 {
981 return FALSE;
982 }
983 spi = message->get_initiator_spi(message);
984 return this->hasher->allocate_hash(this->hasher,
985 chunk_from_thing(spi), hash);
986 }
987 if (message->get_exchange_type(message) == ID_PROT)
988 { /* include the source for Main Mode as the hash will be the same if
989 * SPIs are reused by two initiators that use the same proposal */
990 src = message->get_source(message);
991
992 if (!this->hasher->allocate_hash(this->hasher,
993 src->get_address(src), NULL))
994 {
995 return FALSE;
996 }
997 }
998 return this->hasher->allocate_hash(this->hasher,
999 message->get_packet_data(message), hash);
1000 }
1001
1002 /**
1003 * Check if we already have created an IKE_SA based on the initial IKE message
1004 * with the given hash.
1005 * If not the hash is stored, the hash data is not(!) cloned.
1006 *
1007 * Also, the local SPI is returned. In case of a retransmit this is already
1008 * stored together with the hash, otherwise it is newly allocated and should
1009 * be used to create the IKE_SA.
1010 *
1011 * @returns ALREADY_DONE if the message with the given hash has been seen before
1012 * NOT_FOUND if the message hash was not found
1013 * FAILED if the SPI allocation failed
1014 */
1015 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1016 chunk_t init_hash, u_int64_t *our_spi)
1017 {
1018 table_item_t *item;
1019 u_int row, segment;
1020 mutex_t *mutex;
1021 init_hash_t *init;
1022 u_int64_t spi;
1023
1024 row = chunk_hash(init_hash) & this->table_mask;
1025 segment = row & this->segment_mask;
1026 mutex = this->init_hashes_segments[segment].mutex;
1027 mutex->lock(mutex);
1028 item = this->init_hashes_table[row];
1029 while (item)
1030 {
1031 init_hash_t *current = item->value;
1032
1033 if (chunk_equals(init_hash, current->hash))
1034 {
1035 *our_spi = current->our_spi;
1036 mutex->unlock(mutex);
1037 return ALREADY_DONE;
1038 }
1039 item = item->next;
1040 }
1041
1042 spi = get_spi(this);
1043 if (!spi)
1044 {
1045 return FAILED;
1046 }
1047
1048 INIT(init,
1049 .hash = {
1050 .len = init_hash.len,
1051 .ptr = init_hash.ptr,
1052 },
1053 .our_spi = spi,
1054 );
1055 INIT(item,
1056 .value = init,
1057 .next = this->init_hashes_table[row],
1058 );
1059 this->init_hashes_table[row] = item;
1060 *our_spi = init->our_spi;
1061 mutex->unlock(mutex);
1062 return NOT_FOUND;
1063 }
1064
1065 /**
1066 * Remove the hash of an initial IKE message from the cache.
1067 */
1068 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1069 {
1070 table_item_t *item, *prev = NULL;
1071 u_int row, segment;
1072 mutex_t *mutex;
1073
1074 row = chunk_hash(init_hash) & this->table_mask;
1075 segment = row & this->segment_mask;
1076 mutex = this->init_hashes_segments[segment].mutex;
1077 mutex->lock(mutex);
1078 item = this->init_hashes_table[row];
1079 while (item)
1080 {
1081 init_hash_t *current = item->value;
1082
1083 if (chunk_equals(init_hash, current->hash))
1084 {
1085 if (prev)
1086 {
1087 prev->next = item->next;
1088 }
1089 else
1090 {
1091 this->init_hashes_table[row] = item->next;
1092 }
1093 free(current);
1094 free(item);
1095 break;
1096 }
1097 prev = item;
1098 item = item->next;
1099 }
1100 mutex->unlock(mutex);
1101 }
1102
1103 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1104 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1105 {
1106 ike_sa_t *ike_sa = NULL;
1107 entry_t *entry;
1108 u_int segment;
1109
1110 DBG2(DBG_MGR, "checkout IKE_SA");
1111
1112 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1113 {
1114 if (wait_for_entry(this, entry, segment))
1115 {
1116 entry->checked_out = TRUE;
1117 ike_sa = entry->ike_sa;
1118 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1119 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1120 }
1121 unlock_single_segment(this, segment);
1122 }
1123 charon->bus->set_sa(charon->bus, ike_sa);
1124 return ike_sa;
1125 }
1126
1127 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1128 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1129 {
1130 ike_sa_id_t *ike_sa_id;
1131 ike_sa_t *ike_sa;
1132 u_int8_t ike_version;
1133 u_int64_t spi;
1134
1135 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1136
1137 spi = get_spi(this);
1138 if (!spi)
1139 {
1140 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1141 return NULL;
1142 }
1143
1144 if (initiator)
1145 {
1146 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1147 }
1148 else
1149 {
1150 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1151 }
1152 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1153 ike_sa_id->destroy(ike_sa_id);
1154
1155 if (ike_sa)
1156 {
1157 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1158 ike_sa->get_unique_id(ike_sa));
1159 }
1160 return ike_sa;
1161 }
1162
1163 /**
1164 * Get the message ID or message hash to detect early retransmissions
1165 */
1166 static u_int32_t get_message_id_or_hash(message_t *message)
1167 {
1168 /* Use the message ID, or the message hash in IKEv1 Main/Aggressive mode */
1169 if (message->get_major_version(message) == IKEV1_MAJOR_VERSION &&
1170 message->get_message_id(message) == 0)
1171 {
1172 return chunk_hash(message->get_packet_data(message));
1173 }
1174 return message->get_message_id(message);
1175 }
1176
1177 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1178 private_ike_sa_manager_t* this, message_t *message)
1179 {
1180 u_int segment;
1181 entry_t *entry;
1182 ike_sa_t *ike_sa = NULL;
1183 ike_sa_id_t *id;
1184 ike_version_t ike_version;
1185 bool is_init = FALSE;
1186
1187 id = message->get_ike_sa_id(message);
1188 /* clone the IKE_SA ID so we can modify the initiator flag */
1189 id = id->clone(id);
1190 id->switch_initiator(id);
1191
1192 DBG2(DBG_MGR, "checkout IKE_SA by message");
1193
1194 if (id->get_responder_spi(id) == 0)
1195 {
1196 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1197 {
1198 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1199 message->get_request(message))
1200 {
1201 ike_version = IKEV2;
1202 is_init = TRUE;
1203 }
1204 }
1205 else
1206 {
1207 if (message->get_exchange_type(message) == ID_PROT ||
1208 message->get_exchange_type(message) == AGGRESSIVE)
1209 {
1210 ike_version = IKEV1;
1211 is_init = TRUE;
1212 if (id->is_initiator(id))
1213 { /* not set in IKEv1, switch back before applying to new SA */
1214 id->switch_initiator(id);
1215 }
1216 }
1217 }
1218 }
1219
1220 if (is_init)
1221 {
1222 u_int64_t our_spi;
1223 chunk_t hash;
1224
1225 if (!get_init_hash(this, message, &hash))
1226 {
1227 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1228 id->destroy(id);
1229 return NULL;
1230 }
1231
1232 /* ensure this is not a retransmit of an already handled init message */
1233 switch (check_and_put_init_hash(this, hash, &our_spi))
1234 {
1235 case NOT_FOUND:
1236 { /* we've not seen this packet yet, create a new IKE_SA */
1237 if (!this->ikesa_limit ||
1238 this->public.get_count(&this->public) < this->ikesa_limit)
1239 {
1240 id->set_responder_spi(id, our_spi);
1241 ike_sa = ike_sa_create(id, FALSE, ike_version);
1242 if (ike_sa)
1243 {
1244 entry = entry_create();
1245 entry->ike_sa = ike_sa;
1246 entry->ike_sa_id = id;
1247
1248 segment = put_entry(this, entry);
1249 entry->checked_out = TRUE;
1250 unlock_single_segment(this, segment);
1251
1252 entry->processing = get_message_id_or_hash(message);
1253 entry->init_hash = hash;
1254
1255 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1256 ike_sa->get_name(ike_sa),
1257 ike_sa->get_unique_id(ike_sa));
1258
1259 charon->bus->set_sa(charon->bus, ike_sa);
1260 return ike_sa;
1261 }
1262 else
1263 {
1264 DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1265 }
1266 }
1267 else
1268 {
1269 DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1270 exchange_type_names, message->get_exchange_type(message),
1271 this->ikesa_limit);
1272 }
1273 remove_init_hash(this, hash);
1274 chunk_free(&hash);
1275 id->destroy(id);
1276 return NULL;
1277 }
1278 case FAILED:
1279 { /* we failed to allocate an SPI */
1280 chunk_free(&hash);
1281 id->destroy(id);
1282 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1283 return NULL;
1284 }
1285 case ALREADY_DONE:
1286 default:
1287 break;
1288 }
1289 /* it looks like we already handled this init message to some degree */
1290 id->set_responder_spi(id, our_spi);
1291 chunk_free(&hash);
1292 }
1293
1294 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1295 {
1296 /* only check out if we are not already processing it. */
1297 if (entry->processing == get_message_id_or_hash(message))
1298 {
1299 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1300 entry->processing);
1301 }
1302 else if (wait_for_entry(this, entry, segment))
1303 {
1304 ike_sa_id_t *ike_id;
1305
1306 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1307 entry->checked_out = TRUE;
1308 if (message->get_first_payload_type(message) != FRAGMENT_V1)
1309 {
1310 entry->processing = get_message_id_or_hash(message);
1311 }
1312 if (ike_id->get_responder_spi(ike_id) == 0)
1313 {
1314 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1315 }
1316 ike_sa = entry->ike_sa;
1317 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1318 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1319 }
1320 unlock_single_segment(this, segment);
1321 }
1322 else
1323 {
1324 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1325 }
1326 id->destroy(id);
1327 charon->bus->set_sa(charon->bus, ike_sa);
1328 return ike_sa;
1329 }
1330
1331 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1332 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1333 {
1334 enumerator_t *enumerator;
1335 entry_t *entry;
1336 ike_sa_t *ike_sa = NULL;
1337 peer_cfg_t *current_peer;
1338 ike_cfg_t *current_ike;
1339 u_int segment;
1340
1341 DBG2(DBG_MGR, "checkout IKE_SA by config");
1342
1343 if (!this->reuse_ikesa)
1344 { /* IKE_SA reuse disable by config */
1345 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1346 charon->bus->set_sa(charon->bus, ike_sa);
1347 return ike_sa;
1348 }
1349
1350 enumerator = create_table_enumerator(this);
1351 while (enumerator->enumerate(enumerator, &entry, &segment))
1352 {
1353 if (!wait_for_entry(this, entry, segment))
1354 {
1355 continue;
1356 }
1357 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING)
1358 { /* skip IKE_SAs which are not usable */
1359 continue;
1360 }
1361
1362 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1363 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1364 {
1365 current_ike = current_peer->get_ike_cfg(current_peer);
1366 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1367 {
1368 entry->checked_out = TRUE;
1369 ike_sa = entry->ike_sa;
1370 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1371 ike_sa->get_unique_id(ike_sa),
1372 current_peer->get_name(current_peer));
1373 break;
1374 }
1375 }
1376 }
1377 enumerator->destroy(enumerator);
1378
1379 if (!ike_sa)
1380 { /* no IKE_SA using such a config, hand out a new */
1381 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1382 }
1383 charon->bus->set_sa(charon->bus, ike_sa);
1384 return ike_sa;
1385 }
1386
1387 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1388 private_ike_sa_manager_t *this, u_int32_t id, bool child)
1389 {
1390 enumerator_t *enumerator, *children;
1391 entry_t *entry;
1392 ike_sa_t *ike_sa = NULL;
1393 child_sa_t *child_sa;
1394 u_int segment;
1395
1396 DBG2(DBG_MGR, "checkout IKE_SA by ID");
1397
1398 enumerator = create_table_enumerator(this);
1399 while (enumerator->enumerate(enumerator, &entry, &segment))
1400 {
1401 if (wait_for_entry(this, entry, segment))
1402 {
1403 /* look for a child with such a reqid ... */
1404 if (child)
1405 {
1406 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1407 while (children->enumerate(children, (void**)&child_sa))
1408 {
1409 if (child_sa->get_reqid(child_sa) == id)
1410 {
1411 ike_sa = entry->ike_sa;
1412 break;
1413 }
1414 }
1415 children->destroy(children);
1416 }
1417 else /* ... or for a IKE_SA with such a unique id */
1418 {
1419 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1420 {
1421 ike_sa = entry->ike_sa;
1422 }
1423 }
1424 /* got one, return */
1425 if (ike_sa)
1426 {
1427 entry->checked_out = TRUE;
1428 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1429 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1430 break;
1431 }
1432 }
1433 }
1434 enumerator->destroy(enumerator);
1435
1436 charon->bus->set_sa(charon->bus, ike_sa);
1437 return ike_sa;
1438 }
1439
1440 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1441 private_ike_sa_manager_t *this, char *name, bool child)
1442 {
1443 enumerator_t *enumerator, *children;
1444 entry_t *entry;
1445 ike_sa_t *ike_sa = NULL;
1446 child_sa_t *child_sa;
1447 u_int segment;
1448
1449 enumerator = create_table_enumerator(this);
1450 while (enumerator->enumerate(enumerator, &entry, &segment))
1451 {
1452 if (wait_for_entry(this, entry, segment))
1453 {
1454 /* look for a child with such a policy name ... */
1455 if (child)
1456 {
1457 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1458 while (children->enumerate(children, (void**)&child_sa))
1459 {
1460 if (streq(child_sa->get_name(child_sa), name))
1461 {
1462 ike_sa = entry->ike_sa;
1463 break;
1464 }
1465 }
1466 children->destroy(children);
1467 }
1468 else /* ... or for a IKE_SA with such a connection name */
1469 {
1470 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1471 {
1472 ike_sa = entry->ike_sa;
1473 }
1474 }
1475 /* got one, return */
1476 if (ike_sa)
1477 {
1478 entry->checked_out = TRUE;
1479 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1480 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1481 break;
1482 }
1483 }
1484 }
1485 enumerator->destroy(enumerator);
1486
1487 charon->bus->set_sa(charon->bus, ike_sa);
1488 return ike_sa;
1489 }
1490
1491 /**
1492 * enumerator filter function, waiting variant
1493 */
1494 static bool enumerator_filter_wait(private_ike_sa_manager_t *this,
1495 entry_t **in, ike_sa_t **out, u_int *segment)
1496 {
1497 if (wait_for_entry(this, *in, *segment))
1498 {
1499 *out = (*in)->ike_sa;
1500 charon->bus->set_sa(charon->bus, *out);
1501 return TRUE;
1502 }
1503 return FALSE;
1504 }
1505
1506 /**
1507 * enumerator filter function, skipping variant
1508 */
1509 static bool enumerator_filter_skip(private_ike_sa_manager_t *this,
1510 entry_t **in, ike_sa_t **out, u_int *segment)
1511 {
1512 if (!(*in)->driveout_new_threads &&
1513 !(*in)->driveout_waiting_threads &&
1514 !(*in)->checked_out)
1515 {
1516 *out = (*in)->ike_sa;
1517 charon->bus->set_sa(charon->bus, *out);
1518 return TRUE;
1519 }
1520 return FALSE;
1521 }
1522
1523 /**
1524 * Reset threads SA after enumeration
1525 */
1526 static void reset_sa(void *data)
1527 {
1528 charon->bus->set_sa(charon->bus, NULL);
1529 }
1530
1531 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1532 private_ike_sa_manager_t* this, bool wait)
1533 {
1534 return enumerator_create_filter(create_table_enumerator(this),
1535 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1536 this, reset_sa);
1537 }
1538
1539 METHOD(ike_sa_manager_t, checkin, void,
1540 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1541 {
1542 /* to check the SA back in, we look for the pointer of the ike_sa
1543 * in all entries.
1544 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1545 * on reception of a IKE_SA_INIT response) the lookup will work but
1546 * updating of the SPI MAY be necessary...
1547 */
1548 entry_t *entry;
1549 ike_sa_id_t *ike_sa_id;
1550 host_t *other;
1551 identification_t *my_id, *other_id;
1552 u_int segment;
1553
1554 ike_sa_id = ike_sa->get_id(ike_sa);
1555 my_id = ike_sa->get_my_id(ike_sa);
1556 other_id = ike_sa->get_other_eap_id(ike_sa);
1557 other = ike_sa->get_other_host(ike_sa);
1558
1559 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1560 ike_sa->get_unique_id(ike_sa));
1561
1562 /* look for the entry */
1563 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1564 {
1565 /* ike_sa_id must be updated */
1566 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1567 /* signal waiting threads */
1568 entry->checked_out = FALSE;
1569 entry->processing = -1;
1570 /* check if this SA is half-open */
1571 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1572 {
1573 /* not half open anymore */
1574 entry->half_open = FALSE;
1575 remove_half_open(this, entry);
1576 }
1577 else if (entry->half_open && !other->ip_equals(other, entry->other))
1578 {
1579 /* the other host's IP has changed, we must update the hash table */
1580 remove_half_open(this, entry);
1581 DESTROY_IF(entry->other);
1582 entry->other = other->clone(other);
1583 put_half_open(this, entry);
1584 }
1585 else if (!entry->half_open &&
1586 !entry->ike_sa_id->is_initiator(entry->ike_sa_id) &&
1587 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1588 {
1589 /* this is a new half-open SA */
1590 entry->half_open = TRUE;
1591 entry->other = other->clone(other);
1592 put_half_open(this, entry);
1593 }
1594 DBG2(DBG_MGR, "check-in of IKE_SA successful.");
1595 entry->condvar->signal(entry->condvar);
1596 }
1597 else
1598 {
1599 entry = entry_create();
1600 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1601 entry->ike_sa = ike_sa;
1602 segment = put_entry(this, entry);
1603 }
1604
1605 /* apply identities for duplicate test */
1606 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1607 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1608 entry->my_id == NULL && entry->other_id == NULL)
1609 {
1610 if (ike_sa->get_version(ike_sa) == IKEV1)
1611 {
1612 /* If authenticated and received INITIAL_CONTACT,
1613 * delete any existing IKE_SAs with that peer. */
1614 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1615 {
1616 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1617 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1618 }
1619 }
1620
1621 entry->my_id = my_id->clone(my_id);
1622 entry->other_id = other_id->clone(other_id);
1623 if (!entry->other)
1624 {
1625 entry->other = other->clone(other);
1626 }
1627 put_connected_peers(this, entry);
1628 }
1629
1630 unlock_single_segment(this, segment);
1631
1632 charon->bus->set_sa(charon->bus, NULL);
1633 }
1634
1635 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1636 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1637 {
1638 /* deletion is a bit complex, we must ensure that no thread is waiting for
1639 * this SA.
1640 * We take this SA from the table, and start signaling while threads
1641 * are in the condvar.
1642 */
1643 entry_t *entry;
1644 ike_sa_id_t *ike_sa_id;
1645 u_int segment;
1646
1647 ike_sa_id = ike_sa->get_id(ike_sa);
1648
1649 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1650 ike_sa->get_unique_id(ike_sa));
1651
1652 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1653 {
1654 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1655 { /* it looks like flush() has been called and the SA is being deleted
1656 * anyway, just check it in */
1657 DBG2(DBG_MGR, "ignored check-in and destroy of IKE_SA during shutdown");
1658 entry->checked_out = FALSE;
1659 entry->condvar->broadcast(entry->condvar);
1660 unlock_single_segment(this, segment);
1661 return;
1662 }
1663
1664 /* drive out waiting threads, as we are in hurry */
1665 entry->driveout_waiting_threads = TRUE;
1666 /* mark it, so no new threads can get this entry */
1667 entry->driveout_new_threads = TRUE;
1668 /* wait until all workers have done their work */
1669 while (entry->waiting_threads)
1670 {
1671 /* wake up all */
1672 entry->condvar->broadcast(entry->condvar);
1673 /* they will wake us again when their work is done */
1674 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1675 }
1676 remove_entry(this, entry);
1677 unlock_single_segment(this, segment);
1678
1679 if (entry->half_open)
1680 {
1681 remove_half_open(this, entry);
1682 }
1683 if (entry->my_id && entry->other_id)
1684 {
1685 remove_connected_peers(this, entry);
1686 }
1687 if (entry->init_hash.ptr)
1688 {
1689 remove_init_hash(this, entry->init_hash);
1690 }
1691
1692 entry_destroy(entry);
1693
1694 DBG2(DBG_MGR, "check-in and destroy of IKE_SA successful");
1695 }
1696 else
1697 {
1698 DBG1(DBG_MGR, "tried to check-in and delete nonexisting IKE_SA");
1699 ike_sa->destroy(ike_sa);
1700 }
1701 charon->bus->set_sa(charon->bus, NULL);
1702 }
1703
1704 /**
1705 * Cleanup function for create_id_enumerator
1706 */
1707 static void id_enumerator_cleanup(linked_list_t *ids)
1708 {
1709 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1710 }
1711
1712 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1713 private_ike_sa_manager_t *this, identification_t *me,
1714 identification_t *other, int family)
1715 {
1716 table_item_t *item;
1717 u_int row, segment;
1718 rwlock_t *lock;
1719 linked_list_t *ids = NULL;
1720
1721 row = chunk_hash_inc(other->get_encoding(other),
1722 chunk_hash(me->get_encoding(me))) & this->table_mask;
1723 segment = row & this->segment_mask;
1724
1725 lock = this->connected_peers_segments[segment].lock;
1726 lock->read_lock(lock);
1727 item = this->connected_peers_table[row];
1728 while (item)
1729 {
1730 connected_peers_t *current = item->value;
1731
1732 if (connected_peers_match(current, me, other, family))
1733 {
1734 ids = current->sas->clone_offset(current->sas,
1735 offsetof(ike_sa_id_t, clone));
1736 break;
1737 }
1738 item = item->next;
1739 }
1740 lock->unlock(lock);
1741
1742 if (!ids)
1743 {
1744 return enumerator_create_empty();
1745 }
1746 return enumerator_create_cleaner(ids->create_enumerator(ids),
1747 (void*)id_enumerator_cleanup, ids);
1748 }
1749
1750 /**
1751 * Move all CHILD_SAs from old to new
1752 */
1753 static void adopt_children(ike_sa_t *old, ike_sa_t *new)
1754 {
1755 enumerator_t *enumerator;
1756 child_sa_t *child_sa;
1757
1758 enumerator = old->create_child_sa_enumerator(old);
1759 while (enumerator->enumerate(enumerator, &child_sa))
1760 {
1761 old->remove_child_sa(old, enumerator);
1762 new->add_child_sa(new, child_sa);
1763 }
1764 enumerator->destroy(enumerator);
1765 }
1766
1767 METHOD(ike_sa_manager_t, check_uniqueness, bool,
1768 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
1769 {
1770 bool cancel = FALSE;
1771 peer_cfg_t *peer_cfg;
1772 unique_policy_t policy;
1773 enumerator_t *enumerator;
1774 ike_sa_id_t *id = NULL;
1775 identification_t *me, *other;
1776 host_t *other_host;
1777
1778 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
1779 policy = peer_cfg->get_unique_policy(peer_cfg);
1780 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
1781 {
1782 return FALSE;
1783 }
1784 me = ike_sa->get_my_id(ike_sa);
1785 other = ike_sa->get_other_eap_id(ike_sa);
1786 other_host = ike_sa->get_other_host(ike_sa);
1787
1788 enumerator = create_id_enumerator(this, me, other,
1789 other_host->get_family(other_host));
1790 while (enumerator->enumerate(enumerator, &id))
1791 {
1792 status_t status = SUCCESS;
1793 ike_sa_t *duplicate;
1794
1795 duplicate = checkout(this, id);
1796 if (!duplicate)
1797 {
1798 continue;
1799 }
1800 if (force_replace)
1801 {
1802 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
1803 "received INITIAL_CONTACT", other);
1804 charon->bus->ike_updown(charon->bus, duplicate, FALSE);
1805 checkin_and_destroy(this, duplicate);
1806 continue;
1807 }
1808 peer_cfg = duplicate->get_peer_cfg(duplicate);
1809 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
1810 {
1811 switch (duplicate->get_state(duplicate))
1812 {
1813 case IKE_ESTABLISHED:
1814 case IKE_REKEYING:
1815 switch (policy)
1816 {
1817 case UNIQUE_REPLACE:
1818 charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
1819 if (duplicate->get_version(duplicate) == IKEV1)
1820 {
1821 adopt_children(duplicate, ike_sa);
1822 }
1823 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer "
1824 "'%Y' due to uniqueness policy", other);
1825 status = duplicate->delete(duplicate);
1826 break;
1827 case UNIQUE_KEEP:
1828 cancel = TRUE;
1829 /* we keep the first IKE_SA and delete all
1830 * other duplicates that might exist */
1831 policy = UNIQUE_REPLACE;
1832 break;
1833 default:
1834 break;
1835 }
1836 break;
1837 default:
1838 break;
1839 }
1840 }
1841 if (status == DESTROY_ME)
1842 {
1843 checkin_and_destroy(this, duplicate);
1844 }
1845 else
1846 {
1847 checkin(this, duplicate);
1848 }
1849 }
1850 enumerator->destroy(enumerator);
1851 /* reset thread's current IKE_SA after checkin */
1852 charon->bus->set_sa(charon->bus, ike_sa);
1853 return cancel;
1854 }
1855
1856 METHOD(ike_sa_manager_t, has_contact, bool,
1857 private_ike_sa_manager_t *this, identification_t *me,
1858 identification_t *other, int family)
1859 {
1860 table_item_t *item;
1861 u_int row, segment;
1862 rwlock_t *lock;
1863 bool found = FALSE;
1864
1865 row = chunk_hash_inc(other->get_encoding(other),
1866 chunk_hash(me->get_encoding(me))) & this->table_mask;
1867 segment = row & this->segment_mask;
1868 lock = this->connected_peers_segments[segment].lock;
1869 lock->read_lock(lock);
1870 item = this->connected_peers_table[row];
1871 while (item)
1872 {
1873 if (connected_peers_match(item->value, me, other, family))
1874 {
1875 found = TRUE;
1876 break;
1877 }
1878 item = item->next;
1879 }
1880 lock->unlock(lock);
1881
1882 return found;
1883 }
1884
1885 METHOD(ike_sa_manager_t, get_count, u_int,
1886 private_ike_sa_manager_t *this)
1887 {
1888 u_int segment, count = 0;
1889 mutex_t *mutex;
1890
1891 for (segment = 0; segment < this->segment_count; segment++)
1892 {
1893 mutex = this->segments[segment & this->segment_mask].mutex;
1894 mutex->lock(mutex);
1895 count += this->segments[segment].count;
1896 mutex->unlock(mutex);
1897 }
1898 return count;
1899 }
1900
1901 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
1902 private_ike_sa_manager_t *this, host_t *ip)
1903 {
1904 table_item_t *item;
1905 u_int row, segment;
1906 rwlock_t *lock;
1907 chunk_t addr;
1908 u_int count = 0;
1909
1910 if (ip)
1911 {
1912 addr = ip->get_address(ip);
1913 row = chunk_hash(addr) & this->table_mask;
1914 segment = row & this->segment_mask;
1915 lock = this->half_open_segments[segment].lock;
1916 lock->read_lock(lock);
1917 item = this->half_open_table[row];
1918 while (item)
1919 {
1920 half_open_t *half_open = item->value;
1921
1922 if (chunk_equals(addr, half_open->other))
1923 {
1924 count = half_open->count;
1925 break;
1926 }
1927 item = item->next;
1928 }
1929 lock->unlock(lock);
1930 }
1931 else
1932 {
1933 for (segment = 0; segment < this->segment_count; segment++)
1934 {
1935 lock = this->half_open_segments[segment].lock;
1936 lock->read_lock(lock);
1937 count += this->half_open_segments[segment].count;
1938 lock->unlock(lock);
1939 }
1940 }
1941 return count;
1942 }
1943
1944 METHOD(ike_sa_manager_t, flush, void,
1945 private_ike_sa_manager_t *this)
1946 {
1947 /* destroy all list entries */
1948 enumerator_t *enumerator;
1949 entry_t *entry;
1950 u_int segment;
1951
1952 lock_all_segments(this);
1953 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
1954 /* Step 1: drive out all waiting threads */
1955 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
1956 enumerator = create_table_enumerator(this);
1957 while (enumerator->enumerate(enumerator, &entry, &segment))
1958 {
1959 /* do not accept new threads, drive out waiting threads */
1960 entry->driveout_new_threads = TRUE;
1961 entry->driveout_waiting_threads = TRUE;
1962 }
1963 enumerator->destroy(enumerator);
1964 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
1965 /* Step 2: wait until all are gone */
1966 enumerator = create_table_enumerator(this);
1967 while (enumerator->enumerate(enumerator, &entry, &segment))
1968 {
1969 while (entry->waiting_threads || entry->checked_out)
1970 {
1971 /* wake up all */
1972 entry->condvar->broadcast(entry->condvar);
1973 /* go sleeping until they are gone */
1974 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1975 }
1976 }
1977 enumerator->destroy(enumerator);
1978 DBG2(DBG_MGR, "delete all IKE_SA's");
1979 /* Step 3: initiate deletion of all IKE_SAs */
1980 enumerator = create_table_enumerator(this);
1981 while (enumerator->enumerate(enumerator, &entry, &segment))
1982 {
1983 charon->bus->set_sa(charon->bus, entry->ike_sa);
1984 if (entry->ike_sa->get_version(entry->ike_sa) == IKEV2)
1985 { /* as the delete never gets processed, fire down events */
1986 switch (entry->ike_sa->get_state(entry->ike_sa))
1987 {
1988 case IKE_ESTABLISHED:
1989 case IKE_REKEYING:
1990 case IKE_DELETING:
1991 charon->bus->ike_updown(charon->bus, entry->ike_sa, FALSE);
1992 break;
1993 default:
1994 break;
1995 }
1996 }
1997 entry->ike_sa->delete(entry->ike_sa);
1998 }
1999 enumerator->destroy(enumerator);
2000
2001 DBG2(DBG_MGR, "destroy all entries");
2002 /* Step 4: destroy all entries */
2003 enumerator = create_table_enumerator(this);
2004 while (enumerator->enumerate(enumerator, &entry, &segment))
2005 {
2006 charon->bus->set_sa(charon->bus, entry->ike_sa);
2007 if (entry->half_open)
2008 {
2009 remove_half_open(this, entry);
2010 }
2011 if (entry->my_id && entry->other_id)
2012 {
2013 remove_connected_peers(this, entry);
2014 }
2015 if (entry->init_hash.ptr)
2016 {
2017 remove_init_hash(this, entry->init_hash);
2018 }
2019 remove_entry_at((private_enumerator_t*)enumerator);
2020 entry_destroy(entry);
2021 }
2022 enumerator->destroy(enumerator);
2023 charon->bus->set_sa(charon->bus, NULL);
2024 unlock_all_segments(this);
2025
2026 this->rng->destroy(this->rng);
2027 this->rng = NULL;
2028 this->hasher->destroy(this->hasher);
2029 this->hasher = NULL;
2030 }
2031
2032 METHOD(ike_sa_manager_t, destroy, void,
2033 private_ike_sa_manager_t *this)
2034 {
2035 u_int i;
2036
2037 /* these are already cleared in flush() above */
2038 free(this->ike_sa_table);
2039 free(this->half_open_table);
2040 free(this->connected_peers_table);
2041 free(this->init_hashes_table);
2042 for (i = 0; i < this->segment_count; i++)
2043 {
2044 this->segments[i].mutex->destroy(this->segments[i].mutex);
2045 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2046 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2047 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2048 }
2049 free(this->segments);
2050 free(this->half_open_segments);
2051 free(this->connected_peers_segments);
2052 free(this->init_hashes_segments);
2053
2054 free(this);
2055 }
2056
2057 /**
2058 * This function returns the next-highest power of two for the given number.
2059 * The algorithm works by setting all bits on the right-hand side of the most
2060 * significant 1 to 1 and then increments the whole number so it rolls over
2061 * to the nearest power of two. Note: returns 0 for n == 0
2062 */
2063 static u_int get_nearest_powerof2(u_int n)
2064 {
2065 u_int i;
2066
2067 --n;
2068 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2069 {
2070 n |= n >> i;
2071 }
2072 return ++n;
2073 }
2074
2075 /*
2076 * Described in header.
2077 */
2078 ike_sa_manager_t *ike_sa_manager_create()
2079 {
2080 private_ike_sa_manager_t *this;
2081 u_int i;
2082
2083 INIT(this,
2084 .public = {
2085 .checkout = _checkout,
2086 .checkout_new = _checkout_new,
2087 .checkout_by_message = _checkout_by_message,
2088 .checkout_by_config = _checkout_by_config,
2089 .checkout_by_id = _checkout_by_id,
2090 .checkout_by_name = _checkout_by_name,
2091 .check_uniqueness = _check_uniqueness,
2092 .has_contact = _has_contact,
2093 .create_enumerator = _create_enumerator,
2094 .create_id_enumerator = _create_id_enumerator,
2095 .checkin = _checkin,
2096 .checkin_and_destroy = _checkin_and_destroy,
2097 .get_count = _get_count,
2098 .get_half_open_count = _get_half_open_count,
2099 .flush = _flush,
2100 .destroy = _destroy,
2101 },
2102 );
2103
2104 this->hasher = lib->crypto->create_hasher(lib->crypto, HASH_PREFERRED);
2105 if (this->hasher == NULL)
2106 {
2107 DBG1(DBG_MGR, "manager initialization failed, no hasher supported");
2108 free(this);
2109 return NULL;
2110 }
2111 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2112 if (this->rng == NULL)
2113 {
2114 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2115 this->hasher->destroy(this->hasher);
2116 free(this);
2117 return NULL;
2118 }
2119
2120 this->ikesa_limit = lib->settings->get_int(lib->settings,
2121 "%s.ikesa_limit", 0, charon->name);
2122
2123 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2124 lib->settings, "%s.ikesa_table_size",
2125 DEFAULT_HASHTABLE_SIZE, charon->name));
2126 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2127 this->table_mask = this->table_size - 1;
2128
2129 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2130 lib->settings, "%s.ikesa_table_segments",
2131 DEFAULT_SEGMENT_COUNT, charon->name));
2132 this->segment_count = max(1, min(this->segment_count, this->table_size));
2133 this->segment_mask = this->segment_count - 1;
2134
2135 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2136 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2137 for (i = 0; i < this->segment_count; i++)
2138 {
2139 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2140 this->segments[i].count = 0;
2141 }
2142
2143 /* we use the same table parameters for the table to track half-open SAs */
2144 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2145 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2146 for (i = 0; i < this->segment_count; i++)
2147 {
2148 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2149 this->half_open_segments[i].count = 0;
2150 }
2151
2152 /* also for the hash table used for duplicate tests */
2153 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2154 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2155 for (i = 0; i < this->segment_count; i++)
2156 {
2157 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2158 this->connected_peers_segments[i].count = 0;
2159 }
2160
2161 /* and again for the table of hashes of seen initial IKE messages */
2162 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2163 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2164 for (i = 0; i < this->segment_count; i++)
2165 {
2166 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2167 this->init_hashes_segments[i].count = 0;
2168 }
2169
2170 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2171 "%s.reuse_ikesa", TRUE, charon->name);
2172 return &this->public;
2173 }