2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2012 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 #include "ike_sa_manager.h"
24 #include <sa/ike_sa_id.h>
26 #include <threading/condvar.h>
27 #include <threading/mutex.h>
28 #include <threading/rwlock.h>
29 #include <collections/linked_list.h>
30 #include <crypto/hashers/hasher.h>
31 #include <processing/jobs/delete_ike_sa_job.h>
33 /* the default size of the hash table (MUST be a power of 2) */
34 #define DEFAULT_HASHTABLE_SIZE 1
36 /* the maximum size of the hash table (MUST be a power of 2) */
37 #define MAX_HASHTABLE_SIZE (1 << 30)
39 /* the default number of segments (MUST be a power of 2) */
40 #define DEFAULT_SEGMENT_COUNT 1
42 typedef struct entry_t entry_t
;
45 * An entry in the linked list, contains IKE_SA, locking and lookup data.
50 * Number of threads waiting for this ike_sa_t object.
55 * Condvar where threads can wait until ike_sa_t object is free for use again.
60 * Is this ike_sa currently checked out?
65 * Does this SA drives out new threads?
67 bool driveout_new_threads
;
70 * Does this SA drives out waiting threads?
72 bool driveout_waiting_threads
;
75 * Identification of an IKE_SA (SPIs).
77 ike_sa_id_t
*ike_sa_id
;
80 * The contained ike_sa_t object.
85 * hash of the IKE_SA_INIT message, used to detect retransmissions
90 * remote host address, required for DoS detection and duplicate
91 * checking (host with same my_id and other_id is *not* considered
92 * a duplicate if the address family differs)
97 * As responder: Is this SA half-open?
102 * own identity, required for duplicate checking
104 identification_t
*my_id
;
107 * remote identity, required for duplicate checking
109 identification_t
*other_id
;
112 * message ID or hash of currently processing message, -1 if none
114 u_int32_t processing
;
118 * Implementation of entry_t.destroy.
120 static status_t
entry_destroy(entry_t
*this)
122 /* also destroy IKE SA */
123 this->ike_sa
->destroy(this->ike_sa
);
124 this->ike_sa_id
->destroy(this->ike_sa_id
);
125 chunk_free(&this->init_hash
);
126 DESTROY_IF(this->other
);
127 DESTROY_IF(this->my_id
);
128 DESTROY_IF(this->other_id
);
129 this->condvar
->destroy(this->condvar
);
135 * Creates a new entry for the ike_sa_t list.
137 static entry_t
*entry_create()
142 .condvar
= condvar_create(CONDVAR_TYPE_DEFAULT
),
150 * Function that matches entry_t objects by ike_sa_id_t.
152 static bool entry_match_by_id(entry_t
*entry
, ike_sa_id_t
*id
)
154 if (id
->equals(id
, entry
->ike_sa_id
))
158 if ((id
->get_responder_spi(id
) == 0 ||
159 entry
->ike_sa_id
->get_responder_spi(entry
->ike_sa_id
) == 0) &&
160 id
->get_initiator_spi(id
) == entry
->ike_sa_id
->get_initiator_spi(entry
->ike_sa_id
))
162 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
169 * Function that matches entry_t objects by ike_sa_t pointers.
171 static bool entry_match_by_sa(entry_t
*entry
, ike_sa_t
*ike_sa
)
173 return entry
->ike_sa
== ike_sa
;
177 * Hash function for ike_sa_id_t objects.
179 static u_int
ike_sa_id_hash(ike_sa_id_t
*ike_sa_id
)
181 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
182 * locally unique, so we use our randomly allocated SPI whether we are
183 * initiator or responder to ensure a good distribution. The latter is not
184 * possible for IKEv1 as we don't know whether we are original initiator or
185 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
186 * SPIs (Cookies) to be allocated near random (we allocate them randomly
187 * anyway) it seems safe to always use the initiator SPI. */
188 if (ike_sa_id
->get_ike_version(ike_sa_id
) == IKEV1_MAJOR_VERSION
||
189 ike_sa_id
->is_initiator(ike_sa_id
))
191 return ike_sa_id
->get_initiator_spi(ike_sa_id
);
193 return ike_sa_id
->get_responder_spi(ike_sa_id
);
196 typedef struct half_open_t half_open_t
;
199 * Struct to manage half-open IKE_SAs per peer.
202 /** chunk of remote host address */
205 /** the number of half-open IKE_SAs with that host */
210 * Destroys a half_open_t object.
212 static void half_open_destroy(half_open_t
*this)
214 chunk_free(&this->other
);
218 typedef struct connected_peers_t connected_peers_t
;
220 struct connected_peers_t
{
222 identification_t
*my_id
;
224 /** remote identity */
225 identification_t
*other_id
;
227 /** ip address family of peer */
230 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
234 static void connected_peers_destroy(connected_peers_t
*this)
236 this->my_id
->destroy(this->my_id
);
237 this->other_id
->destroy(this->other_id
);
238 this->sas
->destroy(this->sas
);
243 * Function that matches connected_peers_t objects by the given ids.
245 static inline bool connected_peers_match(connected_peers_t
*connected_peers
,
246 identification_t
*my_id
, identification_t
*other_id
,
249 return my_id
->equals(my_id
, connected_peers
->my_id
) &&
250 other_id
->equals(other_id
, connected_peers
->other_id
) &&
251 (!family
|| family
== connected_peers
->family
);
254 typedef struct init_hash_t init_hash_t
;
257 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
260 /** our SPI allocated for the IKE_SA based on this message */
264 typedef struct segment_t segment_t
;
267 * Struct to manage segments of the hash table.
270 /** mutex to access a segment exclusively */
273 /** the number of entries in this segment */
277 typedef struct shareable_segment_t shareable_segment_t
;
280 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
282 struct shareable_segment_t
{
283 /** rwlock to access a segment non-/exclusively */
286 /** the number of entries in this segment - in case of the "half-open table"
287 * it's the sum of all half_open_t.count in a segment. */
291 typedef struct table_item_t table_item_t
;
294 * Instead of using linked_list_t for each bucket we store the data in our own
295 * list to save memory.
297 struct table_item_t
{
298 /** data of this item */
301 /** next item in the overflow list */
305 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t
;
308 * Additional private members of ike_sa_manager_t.
310 struct private_ike_sa_manager_t
{
312 * Public interface of ike_sa_manager_t.
314 ike_sa_manager_t
public;
317 * Hash table with entries for the ike_sa_t objects.
319 table_item_t
**ike_sa_table
;
322 * The size of the hash table.
327 * Mask to map the hashes to table rows.
332 * Segments of the hash table.
337 * The number of segments.
342 * Mask to map a table row to a segment.
347 * Hash table with half_open_t objects.
349 table_item_t
**half_open_table
;
352 * Segments of the "half-open" hash table.
354 shareable_segment_t
*half_open_segments
;
357 * Total number of half-open IKE_SAs.
359 refcount_t half_open_count
;
362 * Hash table with connected_peers_t objects.
364 table_item_t
**connected_peers_table
;
367 * Segments of the "connected peers" hash table.
369 shareable_segment_t
*connected_peers_segments
;
372 * Hash table with init_hash_t objects.
374 table_item_t
**init_hashes_table
;
377 * Segments of the "hashes" hash table.
379 segment_t
*init_hashes_segments
;
382 * RNG to get random SPIs for our side
387 * reuse existing IKE_SAs in checkout_by_config
392 * Configured IKE_SA limit, if any
398 * Acquire a lock to access the segment of the table row with the given index.
399 * It also works with the segment index directly.
401 static inline void lock_single_segment(private_ike_sa_manager_t
*this,
404 mutex_t
*lock
= this->segments
[index
& this->segment_mask
].mutex
;
409 * Release the lock required to access the segment of the table row with the given index.
410 * It also works with the segment index directly.
412 static inline void unlock_single_segment(private_ike_sa_manager_t
*this,
415 mutex_t
*lock
= this->segments
[index
& this->segment_mask
].mutex
;
422 static void lock_all_segments(private_ike_sa_manager_t
*this)
426 for (i
= 0; i
< this->segment_count
; i
++)
428 this->segments
[i
].mutex
->lock(this->segments
[i
].mutex
);
433 * Unlock all segments
435 static void unlock_all_segments(private_ike_sa_manager_t
*this)
439 for (i
= 0; i
< this->segment_count
; i
++)
441 this->segments
[i
].mutex
->unlock(this->segments
[i
].mutex
);
445 typedef struct private_enumerator_t private_enumerator_t
;
448 * hash table enumerator implementation
450 struct private_enumerator_t
{
453 * implements enumerator interface
455 enumerator_t enumerator
;
458 * associated ike_sa_manager_t
460 private_ike_sa_manager_t
*manager
;
463 * current segment index
468 * currently enumerating entry
473 * current table row index
480 table_item_t
*current
;
483 * previous table item
488 METHOD(enumerator_t
, enumerate
, bool,
489 private_enumerator_t
*this, entry_t
**entry
, u_int
*segment
)
493 this->entry
->condvar
->signal(this->entry
->condvar
);
496 while (this->segment
< this->manager
->segment_count
)
498 while (this->row
< this->manager
->table_size
)
500 this->prev
= this->current
;
503 this->current
= this->current
->next
;
507 lock_single_segment(this->manager
, this->segment
);
508 this->current
= this->manager
->ike_sa_table
[this->row
];
512 *entry
= this->entry
= this->current
->value
;
513 *segment
= this->segment
;
516 unlock_single_segment(this->manager
, this->segment
);
517 this->row
+= this->manager
->segment_count
;
520 this->row
= this->segment
;
525 METHOD(enumerator_t
, enumerator_destroy
, void,
526 private_enumerator_t
*this)
530 this->entry
->condvar
->signal(this->entry
->condvar
);
534 unlock_single_segment(this->manager
, this->segment
);
540 * Creates an enumerator to enumerate the entries in the hash table.
542 static enumerator_t
* create_table_enumerator(private_ike_sa_manager_t
*this)
544 private_enumerator_t
*enumerator
;
548 .enumerate
= (void*)_enumerate
,
549 .destroy
= _enumerator_destroy
,
553 return &enumerator
->enumerator
;
557 * Put an entry into the hash table.
558 * Note: The caller has to unlock the returned segment.
560 static u_int
put_entry(private_ike_sa_manager_t
*this, entry_t
*entry
)
562 table_item_t
*current
, *item
;
569 row
= ike_sa_id_hash(entry
->ike_sa_id
) & this->table_mask
;
570 segment
= row
& this->segment_mask
;
572 lock_single_segment(this, segment
);
573 current
= this->ike_sa_table
[row
];
575 { /* insert at the front of current bucket */
576 item
->next
= current
;
578 this->ike_sa_table
[row
] = item
;
579 this->segments
[segment
].count
++;
584 * Remove an entry from the hash table.
585 * Note: The caller MUST have a lock on the segment of this entry.
587 static void remove_entry(private_ike_sa_manager_t
*this, entry_t
*entry
)
589 table_item_t
*item
, *prev
= NULL
;
592 row
= ike_sa_id_hash(entry
->ike_sa_id
) & this->table_mask
;
593 segment
= row
& this->segment_mask
;
594 item
= this->ike_sa_table
[row
];
597 if (item
->value
== entry
)
601 prev
->next
= item
->next
;
605 this->ike_sa_table
[row
] = item
->next
;
607 this->segments
[segment
].count
--;
617 * Remove the entry at the current enumerator position.
619 static void remove_entry_at(private_enumerator_t
*this)
624 table_item_t
*current
= this->current
;
626 this->manager
->segments
[this->segment
].count
--;
627 this->current
= this->prev
;
631 this->prev
->next
= current
->next
;
635 this->manager
->ike_sa_table
[this->row
] = current
->next
;
636 unlock_single_segment(this->manager
, this->segment
);
643 * Find an entry using the provided match function to compare the entries for
646 static status_t
get_entry_by_match_function(private_ike_sa_manager_t
*this,
647 ike_sa_id_t
*ike_sa_id
, entry_t
**entry
, u_int
*segment
,
648 linked_list_match_t match
, void *param
)
653 row
= ike_sa_id_hash(ike_sa_id
) & this->table_mask
;
654 seg
= row
& this->segment_mask
;
656 lock_single_segment(this, seg
);
657 item
= this->ike_sa_table
[row
];
660 if (match(item
->value
, param
))
662 *entry
= item
->value
;
664 /* the locked segment has to be unlocked by the caller */
669 unlock_single_segment(this, seg
);
674 * Find an entry by ike_sa_id_t.
675 * Note: On SUCCESS, the caller has to unlock the segment.
677 static status_t
get_entry_by_id(private_ike_sa_manager_t
*this,
678 ike_sa_id_t
*ike_sa_id
, entry_t
**entry
, u_int
*segment
)
680 return get_entry_by_match_function(this, ike_sa_id
, entry
, segment
,
681 (linked_list_match_t
)entry_match_by_id
, ike_sa_id
);
685 * Find an entry by IKE_SA pointer.
686 * Note: On SUCCESS, the caller has to unlock the segment.
688 static status_t
get_entry_by_sa(private_ike_sa_manager_t
*this,
689 ike_sa_id_t
*ike_sa_id
, ike_sa_t
*ike_sa
, entry_t
**entry
, u_int
*segment
)
691 return get_entry_by_match_function(this, ike_sa_id
, entry
, segment
,
692 (linked_list_match_t
)entry_match_by_sa
, ike_sa
);
696 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
699 static bool wait_for_entry(private_ike_sa_manager_t
*this, entry_t
*entry
,
702 if (entry
->driveout_new_threads
)
704 /* we are not allowed to get this */
707 while (entry
->checked_out
&& !entry
->driveout_waiting_threads
)
709 /* so wait until we can get it for us.
710 * we register us as waiting. */
711 entry
->waiting_threads
++;
712 entry
->condvar
->wait(entry
->condvar
, this->segments
[segment
].mutex
);
713 entry
->waiting_threads
--;
715 /* hm, a deletion request forbids us to get this SA, get next one */
716 if (entry
->driveout_waiting_threads
)
718 /* we must signal here, others may be waiting on it, too */
719 entry
->condvar
->signal(entry
->condvar
);
726 * Put a half-open SA into the hash table.
728 static void put_half_open(private_ike_sa_manager_t
*this, entry_t
*entry
)
733 half_open_t
*half_open
;
736 addr
= entry
->other
->get_address(entry
->other
);
737 row
= chunk_hash(addr
) & this->table_mask
;
738 segment
= row
& this->segment_mask
;
739 lock
= this->half_open_segments
[segment
].lock
;
740 lock
->write_lock(lock
);
741 item
= this->half_open_table
[row
];
744 half_open
= item
->value
;
746 if (chunk_equals(addr
, half_open
->other
))
757 .other
= chunk_clone(addr
),
762 .next
= this->half_open_table
[row
],
764 this->half_open_table
[row
] = item
;
766 this->half_open_segments
[segment
].count
++;
767 ref_get(&this->half_open_count
);
772 * Remove a half-open SA from the hash table.
774 static void remove_half_open(private_ike_sa_manager_t
*this, entry_t
*entry
)
776 table_item_t
*item
, *prev
= NULL
;
781 addr
= entry
->other
->get_address(entry
->other
);
782 row
= chunk_hash(addr
) & this->table_mask
;
783 segment
= row
& this->segment_mask
;
784 lock
= this->half_open_segments
[segment
].lock
;
785 lock
->write_lock(lock
);
786 item
= this->half_open_table
[row
];
789 half_open_t
*half_open
= item
->value
;
791 if (chunk_equals(addr
, half_open
->other
))
793 if (--half_open
->count
== 0)
797 prev
->next
= item
->next
;
801 this->half_open_table
[row
] = item
->next
;
803 half_open_destroy(half_open
);
806 this->half_open_segments
[segment
].count
--;
807 ignore_result(ref_put(&this->half_open_count
));
817 * Put an SA between two peers into the hash table.
819 static void put_connected_peers(private_ike_sa_manager_t
*this, entry_t
*entry
)
824 connected_peers_t
*connected_peers
;
825 chunk_t my_id
, other_id
;
828 my_id
= entry
->my_id
->get_encoding(entry
->my_id
);
829 other_id
= entry
->other_id
->get_encoding(entry
->other_id
);
830 family
= entry
->other
->get_family(entry
->other
);
831 row
= chunk_hash_inc(other_id
, chunk_hash(my_id
)) & this->table_mask
;
832 segment
= row
& this->segment_mask
;
833 lock
= this->connected_peers_segments
[segment
].lock
;
834 lock
->write_lock(lock
);
835 item
= this->connected_peers_table
[row
];
838 connected_peers
= item
->value
;
840 if (connected_peers_match(connected_peers
, entry
->my_id
,
841 entry
->other_id
, family
))
843 if (connected_peers
->sas
->find_first(connected_peers
->sas
,
844 (linked_list_match_t
)entry
->ike_sa_id
->equals
,
845 NULL
, entry
->ike_sa_id
) == SUCCESS
)
857 INIT(connected_peers
,
858 .my_id
= entry
->my_id
->clone(entry
->my_id
),
859 .other_id
= entry
->other_id
->clone(entry
->other_id
),
861 .sas
= linked_list_create(),
864 .value
= connected_peers
,
865 .next
= this->connected_peers_table
[row
],
867 this->connected_peers_table
[row
] = item
;
869 connected_peers
->sas
->insert_last(connected_peers
->sas
,
870 entry
->ike_sa_id
->clone(entry
->ike_sa_id
));
871 this->connected_peers_segments
[segment
].count
++;
876 * Remove an SA between two peers from the hash table.
878 static void remove_connected_peers(private_ike_sa_manager_t
*this, entry_t
*entry
)
880 table_item_t
*item
, *prev
= NULL
;
883 chunk_t my_id
, other_id
;
886 my_id
= entry
->my_id
->get_encoding(entry
->my_id
);
887 other_id
= entry
->other_id
->get_encoding(entry
->other_id
);
888 family
= entry
->other
->get_family(entry
->other
);
890 row
= chunk_hash_inc(other_id
, chunk_hash(my_id
)) & this->table_mask
;
891 segment
= row
& this->segment_mask
;
893 lock
= this->connected_peers_segments
[segment
].lock
;
894 lock
->write_lock(lock
);
895 item
= this->connected_peers_table
[row
];
898 connected_peers_t
*current
= item
->value
;
900 if (connected_peers_match(current
, entry
->my_id
, entry
->other_id
,
903 enumerator_t
*enumerator
;
904 ike_sa_id_t
*ike_sa_id
;
906 enumerator
= current
->sas
->create_enumerator(current
->sas
);
907 while (enumerator
->enumerate(enumerator
, &ike_sa_id
))
909 if (ike_sa_id
->equals(ike_sa_id
, entry
->ike_sa_id
))
911 current
->sas
->remove_at(current
->sas
, enumerator
);
912 ike_sa_id
->destroy(ike_sa_id
);
913 this->connected_peers_segments
[segment
].count
--;
917 enumerator
->destroy(enumerator
);
918 if (current
->sas
->get_count(current
->sas
) == 0)
922 prev
->next
= item
->next
;
926 this->connected_peers_table
[row
] = item
->next
;
928 connected_peers_destroy(current
);
940 * Get a random SPI for new IKE_SAs
942 static u_int64_t
get_spi(private_ike_sa_manager_t
*this)
947 this->rng
->get_bytes(this->rng
, sizeof(spi
), (u_int8_t
*)&spi
))
955 * Calculate the hash of the initial IKE message. Memory for the hash is
956 * allocated on success.
958 * @returns TRUE on success
960 static bool get_init_hash(hasher_t
*hasher
, message_t
*message
, chunk_t
*hash
)
964 if (message
->get_first_payload_type(message
) == PLV1_FRAGMENT
)
965 { /* only hash the source IP, port and SPI for fragmented init messages */
969 src
= message
->get_source(message
);
970 if (!hasher
->allocate_hash(hasher
, src
->get_address(src
), NULL
))
974 port
= src
->get_port(src
);
975 if (!hasher
->allocate_hash(hasher
, chunk_from_thing(port
), NULL
))
979 spi
= message
->get_initiator_spi(message
);
980 return hasher
->allocate_hash(hasher
, chunk_from_thing(spi
), hash
);
982 if (message
->get_exchange_type(message
) == ID_PROT
)
983 { /* include the source for Main Mode as the hash will be the same if
984 * SPIs are reused by two initiators that use the same proposal */
985 src
= message
->get_source(message
);
987 if (!hasher
->allocate_hash(hasher
, src
->get_address(src
), NULL
))
992 return hasher
->allocate_hash(hasher
, message
->get_packet_data(message
), hash
);
996 * Check if we already have created an IKE_SA based on the initial IKE message
997 * with the given hash.
998 * If not the hash is stored, the hash data is not(!) cloned.
1000 * Also, the local SPI is returned. In case of a retransmit this is already
1001 * stored together with the hash, otherwise it is newly allocated and should
1002 * be used to create the IKE_SA.
1004 * @returns ALREADY_DONE if the message with the given hash has been seen before
1005 * NOT_FOUND if the message hash was not found
1006 * FAILED if the SPI allocation failed
1008 static status_t
check_and_put_init_hash(private_ike_sa_manager_t
*this,
1009 chunk_t init_hash
, u_int64_t
*our_spi
)
1017 row
= chunk_hash(init_hash
) & this->table_mask
;
1018 segment
= row
& this->segment_mask
;
1019 mutex
= this->init_hashes_segments
[segment
].mutex
;
1021 item
= this->init_hashes_table
[row
];
1024 init_hash_t
*current
= item
->value
;
1026 if (chunk_equals(init_hash
, current
->hash
))
1028 *our_spi
= current
->our_spi
;
1029 mutex
->unlock(mutex
);
1030 return ALREADY_DONE
;
1035 spi
= get_spi(this);
1043 .len
= init_hash
.len
,
1044 .ptr
= init_hash
.ptr
,
1050 .next
= this->init_hashes_table
[row
],
1052 this->init_hashes_table
[row
] = item
;
1053 *our_spi
= init
->our_spi
;
1054 mutex
->unlock(mutex
);
1059 * Remove the hash of an initial IKE message from the cache.
1061 static void remove_init_hash(private_ike_sa_manager_t
*this, chunk_t init_hash
)
1063 table_item_t
*item
, *prev
= NULL
;
1067 row
= chunk_hash(init_hash
) & this->table_mask
;
1068 segment
= row
& this->segment_mask
;
1069 mutex
= this->init_hashes_segments
[segment
].mutex
;
1071 item
= this->init_hashes_table
[row
];
1074 init_hash_t
*current
= item
->value
;
1076 if (chunk_equals(init_hash
, current
->hash
))
1080 prev
->next
= item
->next
;
1084 this->init_hashes_table
[row
] = item
->next
;
1093 mutex
->unlock(mutex
);
1096 METHOD(ike_sa_manager_t
, checkout
, ike_sa_t
*,
1097 private_ike_sa_manager_t
*this, ike_sa_id_t
*ike_sa_id
)
1099 ike_sa_t
*ike_sa
= NULL
;
1103 DBG2(DBG_MGR
, "checkout IKE_SA");
1105 if (get_entry_by_id(this, ike_sa_id
, &entry
, &segment
) == SUCCESS
)
1107 if (wait_for_entry(this, entry
, segment
))
1109 entry
->checked_out
= TRUE
;
1110 ike_sa
= entry
->ike_sa
;
1111 DBG2(DBG_MGR
, "IKE_SA %s[%u] successfully checked out",
1112 ike_sa
->get_name(ike_sa
), ike_sa
->get_unique_id(ike_sa
));
1114 unlock_single_segment(this, segment
);
1116 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1120 METHOD(ike_sa_manager_t
, checkout_new
, ike_sa_t
*,
1121 private_ike_sa_manager_t
* this, ike_version_t version
, bool initiator
)
1123 ike_sa_id_t
*ike_sa_id
;
1125 u_int8_t ike_version
;
1128 ike_version
= version
== IKEV1
? IKEV1_MAJOR_VERSION
: IKEV2_MAJOR_VERSION
;
1130 spi
= get_spi(this);
1133 DBG1(DBG_MGR
, "failed to allocate SPI for new IKE_SA");
1139 ike_sa_id
= ike_sa_id_create(ike_version
, spi
, 0, TRUE
);
1143 ike_sa_id
= ike_sa_id_create(ike_version
, 0, spi
, FALSE
);
1145 ike_sa
= ike_sa_create(ike_sa_id
, initiator
, version
);
1146 ike_sa_id
->destroy(ike_sa_id
);
1150 DBG2(DBG_MGR
, "created IKE_SA %s[%u]", ike_sa
->get_name(ike_sa
),
1151 ike_sa
->get_unique_id(ike_sa
));
1157 * Get the message ID or message hash to detect early retransmissions
1159 static u_int32_t
get_message_id_or_hash(message_t
*message
)
1161 /* Use the message ID, or the message hash in IKEv1 Main/Aggressive mode */
1162 if (message
->get_major_version(message
) == IKEV1_MAJOR_VERSION
&&
1163 message
->get_message_id(message
) == 0)
1165 return chunk_hash(message
->get_packet_data(message
));
1167 return message
->get_message_id(message
);
1170 METHOD(ike_sa_manager_t
, checkout_by_message
, ike_sa_t
*,
1171 private_ike_sa_manager_t
* this, message_t
*message
)
1175 ike_sa_t
*ike_sa
= NULL
;
1177 ike_version_t ike_version
;
1178 bool is_init
= FALSE
;
1180 id
= message
->get_ike_sa_id(message
);
1181 /* clone the IKE_SA ID so we can modify the initiator flag */
1183 id
->switch_initiator(id
);
1185 DBG2(DBG_MGR
, "checkout IKE_SA by message");
1187 if (id
->get_responder_spi(id
) == 0)
1189 if (message
->get_major_version(message
) == IKEV2_MAJOR_VERSION
)
1191 if (message
->get_exchange_type(message
) == IKE_SA_INIT
&&
1192 message
->get_request(message
))
1194 ike_version
= IKEV2
;
1200 if (message
->get_exchange_type(message
) == ID_PROT
||
1201 message
->get_exchange_type(message
) == AGGRESSIVE
)
1203 ike_version
= IKEV1
;
1205 if (id
->is_initiator(id
))
1206 { /* not set in IKEv1, switch back before applying to new SA */
1207 id
->switch_initiator(id
);
1219 hasher
= lib
->crypto
->create_hasher(lib
->crypto
, HASH_SHA1
);
1220 if (!hasher
|| !get_init_hash(hasher
, message
, &hash
))
1222 DBG1(DBG_MGR
, "ignoring message, failed to hash message");
1227 hasher
->destroy(hasher
);
1229 /* ensure this is not a retransmit of an already handled init message */
1230 switch (check_and_put_init_hash(this, hash
, &our_spi
))
1233 { /* we've not seen this packet yet, create a new IKE_SA */
1234 if (!this->ikesa_limit
||
1235 this->public.get_count(&this->public) < this->ikesa_limit
)
1237 id
->set_responder_spi(id
, our_spi
);
1238 ike_sa
= ike_sa_create(id
, FALSE
, ike_version
);
1241 entry
= entry_create();
1242 entry
->ike_sa
= ike_sa
;
1243 entry
->ike_sa_id
= id
;
1245 segment
= put_entry(this, entry
);
1246 entry
->checked_out
= TRUE
;
1247 unlock_single_segment(this, segment
);
1249 entry
->processing
= get_message_id_or_hash(message
);
1250 entry
->init_hash
= hash
;
1252 DBG2(DBG_MGR
, "created IKE_SA %s[%u]",
1253 ike_sa
->get_name(ike_sa
),
1254 ike_sa
->get_unique_id(ike_sa
));
1256 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1261 DBG1(DBG_MGR
, "creating IKE_SA failed, ignoring message");
1266 DBG1(DBG_MGR
, "ignoring %N, hitting IKE_SA limit (%u)",
1267 exchange_type_names
, message
->get_exchange_type(message
),
1270 remove_init_hash(this, hash
);
1276 { /* we failed to allocate an SPI */
1279 DBG1(DBG_MGR
, "ignoring message, failed to allocate SPI");
1286 /* it looks like we already handled this init message to some degree */
1287 id
->set_responder_spi(id
, our_spi
);
1291 if (get_entry_by_id(this, id
, &entry
, &segment
) == SUCCESS
)
1293 /* only check out if we are not already processing it. */
1294 if (entry
->processing
== get_message_id_or_hash(message
))
1296 DBG1(DBG_MGR
, "ignoring request with ID %u, already processing",
1299 else if (wait_for_entry(this, entry
, segment
))
1301 ike_sa_id_t
*ike_id
;
1303 ike_id
= entry
->ike_sa
->get_id(entry
->ike_sa
);
1304 entry
->checked_out
= TRUE
;
1305 if (message
->get_first_payload_type(message
) != PLV1_FRAGMENT
&&
1306 message
->get_first_payload_type(message
) != PLV2_FRAGMENT
)
1307 { /* TODO-FRAG: this fails if there are unencrypted payloads */
1308 entry
->processing
= get_message_id_or_hash(message
);
1310 if (ike_id
->get_responder_spi(ike_id
) == 0)
1312 ike_id
->set_responder_spi(ike_id
, id
->get_responder_spi(id
));
1314 ike_sa
= entry
->ike_sa
;
1315 DBG2(DBG_MGR
, "IKE_SA %s[%u] successfully checked out",
1316 ike_sa
->get_name(ike_sa
), ike_sa
->get_unique_id(ike_sa
));
1318 unlock_single_segment(this, segment
);
1322 charon
->bus
->alert(charon
->bus
, ALERT_INVALID_IKE_SPI
, message
);
1325 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1329 METHOD(ike_sa_manager_t
, checkout_by_config
, ike_sa_t
*,
1330 private_ike_sa_manager_t
*this, peer_cfg_t
*peer_cfg
)
1332 enumerator_t
*enumerator
;
1334 ike_sa_t
*ike_sa
= NULL
;
1335 peer_cfg_t
*current_peer
;
1336 ike_cfg_t
*current_ike
;
1339 DBG2(DBG_MGR
, "checkout IKE_SA by config");
1341 if (!this->reuse_ikesa
)
1342 { /* IKE_SA reuse disable by config */
1343 ike_sa
= checkout_new(this, peer_cfg
->get_ike_version(peer_cfg
), TRUE
);
1344 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1348 enumerator
= create_table_enumerator(this);
1349 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
1351 if (!wait_for_entry(this, entry
, segment
))
1355 if (entry
->ike_sa
->get_state(entry
->ike_sa
) == IKE_DELETING
)
1356 { /* skip IKE_SAs which are not usable */
1360 current_peer
= entry
->ike_sa
->get_peer_cfg(entry
->ike_sa
);
1361 if (current_peer
&& current_peer
->equals(current_peer
, peer_cfg
))
1363 current_ike
= current_peer
->get_ike_cfg(current_peer
);
1364 if (current_ike
->equals(current_ike
, peer_cfg
->get_ike_cfg(peer_cfg
)))
1366 entry
->checked_out
= TRUE
;
1367 ike_sa
= entry
->ike_sa
;
1368 DBG2(DBG_MGR
, "found existing IKE_SA %u with a '%s' config",
1369 ike_sa
->get_unique_id(ike_sa
),
1370 current_peer
->get_name(current_peer
));
1375 enumerator
->destroy(enumerator
);
1378 { /* no IKE_SA using such a config, hand out a new */
1379 ike_sa
= checkout_new(this, peer_cfg
->get_ike_version(peer_cfg
), TRUE
);
1381 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1385 METHOD(ike_sa_manager_t
, checkout_by_id
, ike_sa_t
*,
1386 private_ike_sa_manager_t
*this, u_int32_t id
)
1388 enumerator_t
*enumerator
;
1390 ike_sa_t
*ike_sa
= NULL
;
1393 DBG2(DBG_MGR
, "checkout IKE_SA by ID %u", id
);
1395 enumerator
= create_table_enumerator(this);
1396 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
1398 if (wait_for_entry(this, entry
, segment
))
1400 if (entry
->ike_sa
->get_unique_id(entry
->ike_sa
) == id
)
1402 ike_sa
= entry
->ike_sa
;
1403 entry
->checked_out
= TRUE
;
1408 enumerator
->destroy(enumerator
);
1412 DBG2(DBG_MGR
, "IKE_SA %s[%u] successfully checked out",
1413 ike_sa
->get_name(ike_sa
), ike_sa
->get_unique_id(ike_sa
));
1415 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1419 METHOD(ike_sa_manager_t
, checkout_by_name
, ike_sa_t
*,
1420 private_ike_sa_manager_t
*this, char *name
, bool child
)
1422 enumerator_t
*enumerator
, *children
;
1424 ike_sa_t
*ike_sa
= NULL
;
1425 child_sa_t
*child_sa
;
1428 enumerator
= create_table_enumerator(this);
1429 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
1431 if (wait_for_entry(this, entry
, segment
))
1433 /* look for a child with such a policy name ... */
1436 children
= entry
->ike_sa
->create_child_sa_enumerator(entry
->ike_sa
);
1437 while (children
->enumerate(children
, (void**)&child_sa
))
1439 if (streq(child_sa
->get_name(child_sa
), name
))
1441 ike_sa
= entry
->ike_sa
;
1445 children
->destroy(children
);
1447 else /* ... or for a IKE_SA with such a connection name */
1449 if (streq(entry
->ike_sa
->get_name(entry
->ike_sa
), name
))
1451 ike_sa
= entry
->ike_sa
;
1454 /* got one, return */
1457 entry
->checked_out
= TRUE
;
1458 DBG2(DBG_MGR
, "IKE_SA %s[%u] successfully checked out",
1459 ike_sa
->get_name(ike_sa
), ike_sa
->get_unique_id(ike_sa
));
1464 enumerator
->destroy(enumerator
);
1466 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1471 * enumerator filter function, waiting variant
1473 static bool enumerator_filter_wait(private_ike_sa_manager_t
*this,
1474 entry_t
**in
, ike_sa_t
**out
, u_int
*segment
)
1476 if (wait_for_entry(this, *in
, *segment
))
1478 *out
= (*in
)->ike_sa
;
1479 charon
->bus
->set_sa(charon
->bus
, *out
);
1486 * enumerator filter function, skipping variant
1488 static bool enumerator_filter_skip(private_ike_sa_manager_t
*this,
1489 entry_t
**in
, ike_sa_t
**out
, u_int
*segment
)
1491 if (!(*in
)->driveout_new_threads
&&
1492 !(*in
)->driveout_waiting_threads
&&
1493 !(*in
)->checked_out
)
1495 *out
= (*in
)->ike_sa
;
1496 charon
->bus
->set_sa(charon
->bus
, *out
);
1503 * Reset threads SA after enumeration
1505 static void reset_sa(void *data
)
1507 charon
->bus
->set_sa(charon
->bus
, NULL
);
1510 METHOD(ike_sa_manager_t
, create_enumerator
, enumerator_t
*,
1511 private_ike_sa_manager_t
* this, bool wait
)
1513 return enumerator_create_filter(create_table_enumerator(this),
1514 wait
? (void*)enumerator_filter_wait
: (void*)enumerator_filter_skip
,
1518 METHOD(ike_sa_manager_t
, checkin
, void,
1519 private_ike_sa_manager_t
*this, ike_sa_t
*ike_sa
)
1521 /* to check the SA back in, we look for the pointer of the ike_sa
1523 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1524 * on reception of a IKE_SA_INIT response) the lookup will work but
1525 * updating of the SPI MAY be necessary...
1528 ike_sa_id_t
*ike_sa_id
;
1530 identification_t
*my_id
, *other_id
;
1533 ike_sa_id
= ike_sa
->get_id(ike_sa
);
1534 my_id
= ike_sa
->get_my_id(ike_sa
);
1535 other_id
= ike_sa
->get_other_eap_id(ike_sa
);
1536 other
= ike_sa
->get_other_host(ike_sa
);
1538 DBG2(DBG_MGR
, "checkin IKE_SA %s[%u]", ike_sa
->get_name(ike_sa
),
1539 ike_sa
->get_unique_id(ike_sa
));
1541 /* look for the entry */
1542 if (get_entry_by_sa(this, ike_sa_id
, ike_sa
, &entry
, &segment
) == SUCCESS
)
1544 /* ike_sa_id must be updated */
1545 entry
->ike_sa_id
->replace_values(entry
->ike_sa_id
, ike_sa
->get_id(ike_sa
));
1546 /* signal waiting threads */
1547 entry
->checked_out
= FALSE
;
1548 entry
->processing
= -1;
1549 /* check if this SA is half-open */
1550 if (entry
->half_open
&& ike_sa
->get_state(ike_sa
) != IKE_CONNECTING
)
1552 /* not half open anymore */
1553 entry
->half_open
= FALSE
;
1554 remove_half_open(this, entry
);
1556 else if (entry
->half_open
&& !other
->ip_equals(other
, entry
->other
))
1558 /* the other host's IP has changed, we must update the hash table */
1559 remove_half_open(this, entry
);
1560 DESTROY_IF(entry
->other
);
1561 entry
->other
= other
->clone(other
);
1562 put_half_open(this, entry
);
1564 else if (!entry
->half_open
&&
1565 !entry
->ike_sa_id
->is_initiator(entry
->ike_sa_id
) &&
1566 ike_sa
->get_state(ike_sa
) == IKE_CONNECTING
)
1568 /* this is a new half-open SA */
1569 entry
->half_open
= TRUE
;
1570 entry
->other
= other
->clone(other
);
1571 put_half_open(this, entry
);
1573 DBG2(DBG_MGR
, "check-in of IKE_SA successful.");
1574 entry
->condvar
->signal(entry
->condvar
);
1578 entry
= entry_create();
1579 entry
->ike_sa_id
= ike_sa_id
->clone(ike_sa_id
);
1580 entry
->ike_sa
= ike_sa
;
1581 segment
= put_entry(this, entry
);
1584 /* apply identities for duplicate test */
1585 if ((ike_sa
->get_state(ike_sa
) == IKE_ESTABLISHED
||
1586 ike_sa
->get_state(ike_sa
) == IKE_PASSIVE
) &&
1587 entry
->my_id
== NULL
&& entry
->other_id
== NULL
)
1589 if (ike_sa
->get_version(ike_sa
) == IKEV1
)
1591 /* If authenticated and received INITIAL_CONTACT,
1592 * delete any existing IKE_SAs with that peer. */
1593 if (ike_sa
->has_condition(ike_sa
, COND_INIT_CONTACT_SEEN
))
1595 this->public.check_uniqueness(&this->public, ike_sa
, TRUE
);
1596 ike_sa
->set_condition(ike_sa
, COND_INIT_CONTACT_SEEN
, FALSE
);
1600 entry
->my_id
= my_id
->clone(my_id
);
1601 entry
->other_id
= other_id
->clone(other_id
);
1604 entry
->other
= other
->clone(other
);
1606 put_connected_peers(this, entry
);
1609 unlock_single_segment(this, segment
);
1611 charon
->bus
->set_sa(charon
->bus
, NULL
);
1614 METHOD(ike_sa_manager_t
, checkin_and_destroy
, void,
1615 private_ike_sa_manager_t
*this, ike_sa_t
*ike_sa
)
1617 /* deletion is a bit complex, we must ensure that no thread is waiting for
1619 * We take this SA from the table, and start signaling while threads
1620 * are in the condvar.
1623 ike_sa_id_t
*ike_sa_id
;
1626 ike_sa_id
= ike_sa
->get_id(ike_sa
);
1628 DBG2(DBG_MGR
, "checkin and destroy IKE_SA %s[%u]", ike_sa
->get_name(ike_sa
),
1629 ike_sa
->get_unique_id(ike_sa
));
1631 if (get_entry_by_sa(this, ike_sa_id
, ike_sa
, &entry
, &segment
) == SUCCESS
)
1633 if (entry
->driveout_waiting_threads
&& entry
->driveout_new_threads
)
1634 { /* it looks like flush() has been called and the SA is being deleted
1635 * anyway, just check it in */
1636 DBG2(DBG_MGR
, "ignored check-in and destroy of IKE_SA during shutdown");
1637 entry
->checked_out
= FALSE
;
1638 entry
->condvar
->broadcast(entry
->condvar
);
1639 unlock_single_segment(this, segment
);
1643 /* drive out waiting threads, as we are in hurry */
1644 entry
->driveout_waiting_threads
= TRUE
;
1645 /* mark it, so no new threads can get this entry */
1646 entry
->driveout_new_threads
= TRUE
;
1647 /* wait until all workers have done their work */
1648 while (entry
->waiting_threads
)
1651 entry
->condvar
->broadcast(entry
->condvar
);
1652 /* they will wake us again when their work is done */
1653 entry
->condvar
->wait(entry
->condvar
, this->segments
[segment
].mutex
);
1655 remove_entry(this, entry
);
1656 unlock_single_segment(this, segment
);
1658 if (entry
->half_open
)
1660 remove_half_open(this, entry
);
1662 if (entry
->my_id
&& entry
->other_id
)
1664 remove_connected_peers(this, entry
);
1666 if (entry
->init_hash
.ptr
)
1668 remove_init_hash(this, entry
->init_hash
);
1671 entry_destroy(entry
);
1673 DBG2(DBG_MGR
, "check-in and destroy of IKE_SA successful");
1677 DBG1(DBG_MGR
, "tried to check-in and delete nonexisting IKE_SA");
1678 ike_sa
->destroy(ike_sa
);
1680 charon
->bus
->set_sa(charon
->bus
, NULL
);
1684 * Cleanup function for create_id_enumerator
1686 static void id_enumerator_cleanup(linked_list_t
*ids
)
1688 ids
->destroy_offset(ids
, offsetof(ike_sa_id_t
, destroy
));
1691 METHOD(ike_sa_manager_t
, create_id_enumerator
, enumerator_t
*,
1692 private_ike_sa_manager_t
*this, identification_t
*me
,
1693 identification_t
*other
, int family
)
1698 linked_list_t
*ids
= NULL
;
1700 row
= chunk_hash_inc(other
->get_encoding(other
),
1701 chunk_hash(me
->get_encoding(me
))) & this->table_mask
;
1702 segment
= row
& this->segment_mask
;
1704 lock
= this->connected_peers_segments
[segment
].lock
;
1705 lock
->read_lock(lock
);
1706 item
= this->connected_peers_table
[row
];
1709 connected_peers_t
*current
= item
->value
;
1711 if (connected_peers_match(current
, me
, other
, family
))
1713 ids
= current
->sas
->clone_offset(current
->sas
,
1714 offsetof(ike_sa_id_t
, clone
));
1723 return enumerator_create_empty();
1725 return enumerator_create_cleaner(ids
->create_enumerator(ids
),
1726 (void*)id_enumerator_cleanup
, ids
);
1730 * Move all CHILD_SAs from old to new
1732 static void adopt_children(ike_sa_t
*old
, ike_sa_t
*new)
1734 enumerator_t
*enumerator
;
1735 child_sa_t
*child_sa
;
1737 enumerator
= old
->create_child_sa_enumerator(old
);
1738 while (enumerator
->enumerate(enumerator
, &child_sa
))
1740 old
->remove_child_sa(old
, enumerator
);
1741 new->add_child_sa(new, child_sa
);
1743 enumerator
->destroy(enumerator
);
1747 * Check if the replaced IKE_SA might get reauthenticated from host
1749 static bool is_ikev1_reauth(ike_sa_t
*duplicate
, host_t
*host
)
1751 return duplicate
->get_version(duplicate
) == IKEV1
&&
1752 host
->equals(host
, duplicate
->get_other_host(duplicate
));
1756 * Delete an existing IKE_SA due to a unique replace policy
1758 static status_t
enforce_replace(private_ike_sa_manager_t
*this,
1759 ike_sa_t
*duplicate
, ike_sa_t
*new,
1760 identification_t
*other
, host_t
*host
)
1762 charon
->bus
->alert(charon
->bus
, ALERT_UNIQUE_REPLACE
);
1764 if (is_ikev1_reauth(duplicate
, host
))
1766 /* looks like a reauthentication attempt */
1767 if (!new->has_condition(new, COND_INIT_CONTACT_SEEN
))
1769 adopt_children(duplicate
, new);
1771 /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
1772 * peers need to complete the new SA first, otherwise the quick modes
1773 * might get lost. */
1774 lib
->scheduler
->schedule_job(lib
->scheduler
, (job_t
*)
1775 delete_ike_sa_job_create(duplicate
->get_id(duplicate
), TRUE
), 10);
1778 DBG1(DBG_IKE
, "deleting duplicate IKE_SA for peer '%Y' due to "
1779 "uniqueness policy", other
);
1780 return duplicate
->delete(duplicate
);
1783 METHOD(ike_sa_manager_t
, check_uniqueness
, bool,
1784 private_ike_sa_manager_t
*this, ike_sa_t
*ike_sa
, bool force_replace
)
1786 bool cancel
= FALSE
;
1787 peer_cfg_t
*peer_cfg
;
1788 unique_policy_t policy
;
1789 enumerator_t
*enumerator
;
1790 ike_sa_id_t
*id
= NULL
;
1791 identification_t
*me
, *other
;
1794 peer_cfg
= ike_sa
->get_peer_cfg(ike_sa
);
1795 policy
= peer_cfg
->get_unique_policy(peer_cfg
);
1796 if (policy
== UNIQUE_NEVER
|| (policy
== UNIQUE_NO
&& !force_replace
))
1800 me
= ike_sa
->get_my_id(ike_sa
);
1801 other
= ike_sa
->get_other_eap_id(ike_sa
);
1802 other_host
= ike_sa
->get_other_host(ike_sa
);
1804 enumerator
= create_id_enumerator(this, me
, other
,
1805 other_host
->get_family(other_host
));
1806 while (enumerator
->enumerate(enumerator
, &id
))
1808 status_t status
= SUCCESS
;
1809 ike_sa_t
*duplicate
;
1811 duplicate
= checkout(this, id
);
1818 DBG1(DBG_IKE
, "destroying duplicate IKE_SA for peer '%Y', "
1819 "received INITIAL_CONTACT", other
);
1820 charon
->bus
->ike_updown(charon
->bus
, duplicate
, FALSE
);
1821 checkin_and_destroy(this, duplicate
);
1824 peer_cfg
= duplicate
->get_peer_cfg(duplicate
);
1825 if (peer_cfg
&& peer_cfg
->equals(peer_cfg
, ike_sa
->get_peer_cfg(ike_sa
)))
1827 switch (duplicate
->get_state(duplicate
))
1829 case IKE_ESTABLISHED
:
1833 case UNIQUE_REPLACE
:
1834 status
= enforce_replace(this, duplicate
, ike_sa
,
1838 if (!is_ikev1_reauth(duplicate
, other_host
))
1841 /* we keep the first IKE_SA and delete all
1842 * other duplicates that might exist */
1843 policy
= UNIQUE_REPLACE
;
1854 if (status
== DESTROY_ME
)
1856 checkin_and_destroy(this, duplicate
);
1860 checkin(this, duplicate
);
1863 enumerator
->destroy(enumerator
);
1864 /* reset thread's current IKE_SA after checkin */
1865 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1869 METHOD(ike_sa_manager_t
, has_contact
, bool,
1870 private_ike_sa_manager_t
*this, identification_t
*me
,
1871 identification_t
*other
, int family
)
1878 row
= chunk_hash_inc(other
->get_encoding(other
),
1879 chunk_hash(me
->get_encoding(me
))) & this->table_mask
;
1880 segment
= row
& this->segment_mask
;
1881 lock
= this->connected_peers_segments
[segment
].lock
;
1882 lock
->read_lock(lock
);
1883 item
= this->connected_peers_table
[row
];
1886 if (connected_peers_match(item
->value
, me
, other
, family
))
1898 METHOD(ike_sa_manager_t
, get_count
, u_int
,
1899 private_ike_sa_manager_t
*this)
1901 u_int segment
, count
= 0;
1904 for (segment
= 0; segment
< this->segment_count
; segment
++)
1906 mutex
= this->segments
[segment
& this->segment_mask
].mutex
;
1908 count
+= this->segments
[segment
].count
;
1909 mutex
->unlock(mutex
);
1914 METHOD(ike_sa_manager_t
, get_half_open_count
, u_int
,
1915 private_ike_sa_manager_t
*this, host_t
*ip
)
1925 addr
= ip
->get_address(ip
);
1926 row
= chunk_hash(addr
) & this->table_mask
;
1927 segment
= row
& this->segment_mask
;
1928 lock
= this->half_open_segments
[segment
].lock
;
1929 lock
->read_lock(lock
);
1930 item
= this->half_open_table
[row
];
1933 half_open_t
*half_open
= item
->value
;
1935 if (chunk_equals(addr
, half_open
->other
))
1937 count
= half_open
->count
;
1946 count
= (u_int
)ref_cur(&this->half_open_count
);
1951 METHOD(ike_sa_manager_t
, flush
, void,
1952 private_ike_sa_manager_t
*this)
1954 /* destroy all list entries */
1955 enumerator_t
*enumerator
;
1959 lock_all_segments(this);
1960 DBG2(DBG_MGR
, "going to destroy IKE_SA manager and all managed IKE_SA's");
1961 /* Step 1: drive out all waiting threads */
1962 DBG2(DBG_MGR
, "set driveout flags for all stored IKE_SA's");
1963 enumerator
= create_table_enumerator(this);
1964 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
1966 /* do not accept new threads, drive out waiting threads */
1967 entry
->driveout_new_threads
= TRUE
;
1968 entry
->driveout_waiting_threads
= TRUE
;
1970 enumerator
->destroy(enumerator
);
1971 DBG2(DBG_MGR
, "wait for all threads to leave IKE_SA's");
1972 /* Step 2: wait until all are gone */
1973 enumerator
= create_table_enumerator(this);
1974 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
1976 while (entry
->waiting_threads
|| entry
->checked_out
)
1979 entry
->condvar
->broadcast(entry
->condvar
);
1980 /* go sleeping until they are gone */
1981 entry
->condvar
->wait(entry
->condvar
, this->segments
[segment
].mutex
);
1984 enumerator
->destroy(enumerator
);
1985 DBG2(DBG_MGR
, "delete all IKE_SA's");
1986 /* Step 3: initiate deletion of all IKE_SAs */
1987 enumerator
= create_table_enumerator(this);
1988 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
1990 charon
->bus
->set_sa(charon
->bus
, entry
->ike_sa
);
1991 if (entry
->ike_sa
->get_version(entry
->ike_sa
) == IKEV2
)
1992 { /* as the delete never gets processed, fire down events */
1993 switch (entry
->ike_sa
->get_state(entry
->ike_sa
))
1995 case IKE_ESTABLISHED
:
1998 charon
->bus
->ike_updown(charon
->bus
, entry
->ike_sa
, FALSE
);
2004 entry
->ike_sa
->delete(entry
->ike_sa
);
2006 enumerator
->destroy(enumerator
);
2008 DBG2(DBG_MGR
, "destroy all entries");
2009 /* Step 4: destroy all entries */
2010 enumerator
= create_table_enumerator(this);
2011 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
2013 charon
->bus
->set_sa(charon
->bus
, entry
->ike_sa
);
2014 if (entry
->half_open
)
2016 remove_half_open(this, entry
);
2018 if (entry
->my_id
&& entry
->other_id
)
2020 remove_connected_peers(this, entry
);
2022 if (entry
->init_hash
.ptr
)
2024 remove_init_hash(this, entry
->init_hash
);
2026 remove_entry_at((private_enumerator_t
*)enumerator
);
2027 entry_destroy(entry
);
2029 enumerator
->destroy(enumerator
);
2030 charon
->bus
->set_sa(charon
->bus
, NULL
);
2031 unlock_all_segments(this);
2033 this->rng
->destroy(this->rng
);
2037 METHOD(ike_sa_manager_t
, destroy
, void,
2038 private_ike_sa_manager_t
*this)
2042 /* these are already cleared in flush() above */
2043 free(this->ike_sa_table
);
2044 free(this->half_open_table
);
2045 free(this->connected_peers_table
);
2046 free(this->init_hashes_table
);
2047 for (i
= 0; i
< this->segment_count
; i
++)
2049 this->segments
[i
].mutex
->destroy(this->segments
[i
].mutex
);
2050 this->half_open_segments
[i
].lock
->destroy(this->half_open_segments
[i
].lock
);
2051 this->connected_peers_segments
[i
].lock
->destroy(this->connected_peers_segments
[i
].lock
);
2052 this->init_hashes_segments
[i
].mutex
->destroy(this->init_hashes_segments
[i
].mutex
);
2054 free(this->segments
);
2055 free(this->half_open_segments
);
2056 free(this->connected_peers_segments
);
2057 free(this->init_hashes_segments
);
2063 * This function returns the next-highest power of two for the given number.
2064 * The algorithm works by setting all bits on the right-hand side of the most
2065 * significant 1 to 1 and then increments the whole number so it rolls over
2066 * to the nearest power of two. Note: returns 0 for n == 0
2068 static u_int
get_nearest_powerof2(u_int n
)
2073 for (i
= 1; i
< sizeof(u_int
) * 8; i
<<= 1)
2081 * Described in header.
2083 ike_sa_manager_t
*ike_sa_manager_create()
2085 private_ike_sa_manager_t
*this;
2090 .checkout
= _checkout
,
2091 .checkout_new
= _checkout_new
,
2092 .checkout_by_message
= _checkout_by_message
,
2093 .checkout_by_config
= _checkout_by_config
,
2094 .checkout_by_id
= _checkout_by_id
,
2095 .checkout_by_name
= _checkout_by_name
,
2096 .check_uniqueness
= _check_uniqueness
,
2097 .has_contact
= _has_contact
,
2098 .create_enumerator
= _create_enumerator
,
2099 .create_id_enumerator
= _create_id_enumerator
,
2100 .checkin
= _checkin
,
2101 .checkin_and_destroy
= _checkin_and_destroy
,
2102 .get_count
= _get_count
,
2103 .get_half_open_count
= _get_half_open_count
,
2105 .destroy
= _destroy
,
2109 this->rng
= lib
->crypto
->create_rng(lib
->crypto
, RNG_WEAK
);
2110 if (this->rng
== NULL
)
2112 DBG1(DBG_MGR
, "manager initialization failed, no RNG supported");
2117 this->ikesa_limit
= lib
->settings
->get_int(lib
->settings
,
2118 "%s.ikesa_limit", 0, lib
->ns
);
2120 this->table_size
= get_nearest_powerof2(lib
->settings
->get_int(
2121 lib
->settings
, "%s.ikesa_table_size",
2122 DEFAULT_HASHTABLE_SIZE
, lib
->ns
));
2123 this->table_size
= max(1, min(this->table_size
, MAX_HASHTABLE_SIZE
));
2124 this->table_mask
= this->table_size
- 1;
2126 this->segment_count
= get_nearest_powerof2(lib
->settings
->get_int(
2127 lib
->settings
, "%s.ikesa_table_segments",
2128 DEFAULT_SEGMENT_COUNT
, lib
->ns
));
2129 this->segment_count
= max(1, min(this->segment_count
, this->table_size
));
2130 this->segment_mask
= this->segment_count
- 1;
2132 this->ike_sa_table
= calloc(this->table_size
, sizeof(table_item_t
*));
2133 this->segments
= (segment_t
*)calloc(this->segment_count
, sizeof(segment_t
));
2134 for (i
= 0; i
< this->segment_count
; i
++)
2136 this->segments
[i
].mutex
= mutex_create(MUTEX_TYPE_RECURSIVE
);
2137 this->segments
[i
].count
= 0;
2140 /* we use the same table parameters for the table to track half-open SAs */
2141 this->half_open_table
= calloc(this->table_size
, sizeof(table_item_t
*));
2142 this->half_open_segments
= calloc(this->segment_count
, sizeof(shareable_segment_t
));
2143 for (i
= 0; i
< this->segment_count
; i
++)
2145 this->half_open_segments
[i
].lock
= rwlock_create(RWLOCK_TYPE_DEFAULT
);
2146 this->half_open_segments
[i
].count
= 0;
2149 /* also for the hash table used for duplicate tests */
2150 this->connected_peers_table
= calloc(this->table_size
, sizeof(table_item_t
*));
2151 this->connected_peers_segments
= calloc(this->segment_count
, sizeof(shareable_segment_t
));
2152 for (i
= 0; i
< this->segment_count
; i
++)
2154 this->connected_peers_segments
[i
].lock
= rwlock_create(RWLOCK_TYPE_DEFAULT
);
2155 this->connected_peers_segments
[i
].count
= 0;
2158 /* and again for the table of hashes of seen initial IKE messages */
2159 this->init_hashes_table
= calloc(this->table_size
, sizeof(table_item_t
*));
2160 this->init_hashes_segments
= calloc(this->segment_count
, sizeof(segment_t
));
2161 for (i
= 0; i
< this->segment_count
; i
++)
2163 this->init_hashes_segments
[i
].mutex
= mutex_create(MUTEX_TYPE_RECURSIVE
);
2164 this->init_hashes_segments
[i
].count
= 0;
2167 this->reuse_ikesa
= lib
->settings
->get_bool(lib
->settings
,
2168 "%s.reuse_ikesa", TRUE
, lib
->ns
);
2169 return &this->public;