2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
5 * Copyright (C) 2008-2018 Tobias Brunner
6 * Copyright (C) 2005 Jan Hutter
7 * HSR Hochschule fuer Technik Rapperswil
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
23 #include "ike_sa_manager.h"
26 #include <sa/ike_sa_id.h>
28 #include <threading/thread.h>
29 #include <threading/condvar.h>
30 #include <threading/mutex.h>
31 #include <threading/rwlock.h>
32 #include <collections/linked_list.h>
33 #include <crypto/hashers/hasher.h>
34 #include <processing/jobs/delete_ike_sa_job.h>
36 /* the default size of the hash table (MUST be a power of 2) */
37 #define DEFAULT_HASHTABLE_SIZE 1
39 /* the maximum size of the hash table (MUST be a power of 2) */
40 #define MAX_HASHTABLE_SIZE (1 << 30)
42 /* the default number of segments (MUST be a power of 2) */
43 #define DEFAULT_SEGMENT_COUNT 1
45 typedef struct entry_t entry_t
;
48 * An entry in the linked list, contains IKE_SA, locking and lookup data.
53 * Number of threads waiting for this ike_sa_t object.
58 * Condvar where threads can wait until ike_sa_t object is free for use again.
63 * Thread by which this IKE_SA is currently checked out, if any
65 thread_t
*checked_out
;
68 * Does this SA drives out new threads?
70 bool driveout_new_threads
;
73 * Does this SA drives out waiting threads?
75 bool driveout_waiting_threads
;
78 * Identification of an IKE_SA (SPIs).
80 ike_sa_id_t
*ike_sa_id
;
83 * The contained ike_sa_t object.
88 * hash of the IKE_SA_INIT message, used to detect retransmissions
93 * remote host address, required for DoS detection and duplicate
94 * checking (host with same my_id and other_id is *not* considered
95 * a duplicate if the address family differs)
100 * As responder: Is this SA half-open?
105 * own identity, required for duplicate checking
107 identification_t
*my_id
;
110 * remote identity, required for duplicate checking
112 identification_t
*other_id
;
115 * message ID or hash of currently processing message, -1 if none
121 * Implementation of entry_t.destroy.
123 static status_t
entry_destroy(entry_t
*this)
125 /* also destroy IKE SA */
126 this->ike_sa
->destroy(this->ike_sa
);
127 this->ike_sa_id
->destroy(this->ike_sa_id
);
128 chunk_free(&this->init_hash
);
129 DESTROY_IF(this->other
);
130 DESTROY_IF(this->my_id
);
131 DESTROY_IF(this->other_id
);
132 this->condvar
->destroy(this->condvar
);
138 * Creates a new entry for the ike_sa_t list.
140 static entry_t
*entry_create()
145 .condvar
= condvar_create(CONDVAR_TYPE_DEFAULT
),
153 * Function that matches entry_t objects by ike_sa_id_t.
155 static bool entry_match_by_id(entry_t
*entry
, void *arg
)
157 ike_sa_id_t
*id
= arg
;
159 if (id
->equals(id
, entry
->ike_sa_id
))
163 if ((id
->get_responder_spi(id
) == 0 ||
164 entry
->ike_sa_id
->get_responder_spi(entry
->ike_sa_id
) == 0) &&
165 (id
->get_ike_version(id
) == IKEV1_MAJOR_VERSION
||
166 id
->is_initiator(id
) == entry
->ike_sa_id
->is_initiator(entry
->ike_sa_id
)) &&
167 id
->get_initiator_spi(id
) == entry
->ike_sa_id
->get_initiator_spi(entry
->ike_sa_id
))
169 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
176 * Function that matches entry_t objects by ike_sa_t pointers.
178 static bool entry_match_by_sa(entry_t
*entry
, void *ike_sa
)
180 return entry
->ike_sa
== ike_sa
;
184 * Hash function for ike_sa_id_t objects.
186 static u_int
ike_sa_id_hash(ike_sa_id_t
*ike_sa_id
)
188 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
189 * locally unique, so we use our randomly allocated SPI whether we are
190 * initiator or responder to ensure a good distribution. The latter is not
191 * possible for IKEv1 as we don't know whether we are original initiator or
192 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
193 * SPIs (Cookies) to be allocated near random (we allocate them randomly
194 * anyway) it seems safe to always use the initiator SPI. */
195 if (ike_sa_id
->get_ike_version(ike_sa_id
) == IKEV1_MAJOR_VERSION
||
196 ike_sa_id
->is_initiator(ike_sa_id
))
198 return ike_sa_id
->get_initiator_spi(ike_sa_id
);
200 return ike_sa_id
->get_responder_spi(ike_sa_id
);
203 typedef struct half_open_t half_open_t
;
206 * Struct to manage half-open IKE_SAs per peer.
209 /** chunk of remote host address */
212 /** the number of half-open IKE_SAs with that host */
215 /** the number of half-open IKE_SAs we responded to with that host */
216 u_int count_responder
;
220 * Destroys a half_open_t object.
222 static void half_open_destroy(half_open_t
*this)
224 chunk_free(&this->other
);
228 typedef struct connected_peers_t connected_peers_t
;
230 struct connected_peers_t
{
232 identification_t
*my_id
;
234 /** remote identity */
235 identification_t
*other_id
;
237 /** ip address family of peer */
240 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
244 static void connected_peers_destroy(connected_peers_t
*this)
246 this->my_id
->destroy(this->my_id
);
247 this->other_id
->destroy(this->other_id
);
248 this->sas
->destroy(this->sas
);
253 * Function that matches connected_peers_t objects by the given ids.
255 static inline bool connected_peers_match(connected_peers_t
*connected_peers
,
256 identification_t
*my_id
, identification_t
*other_id
,
259 return my_id
->equals(my_id
, connected_peers
->my_id
) &&
260 other_id
->equals(other_id
, connected_peers
->other_id
) &&
261 (!family
|| family
== connected_peers
->family
);
264 typedef struct init_hash_t init_hash_t
;
267 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
270 /** our SPI allocated for the IKE_SA based on this message */
274 typedef struct segment_t segment_t
;
277 * Struct to manage segments of the hash table.
280 /** mutex to access a segment exclusively */
284 typedef struct shareable_segment_t shareable_segment_t
;
287 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
289 struct shareable_segment_t
{
290 /** rwlock to access a segment non-/exclusively */
293 /** the number of entries in this segment - in case of the "half-open table"
294 * it's the sum of all half_open_t.count in a segment. */
298 typedef struct table_item_t table_item_t
;
301 * Instead of using linked_list_t for each bucket we store the data in our own
302 * list to save memory.
304 struct table_item_t
{
305 /** data of this item */
308 /** next item in the overflow list */
312 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t
;
315 * Additional private members of ike_sa_manager_t.
317 struct private_ike_sa_manager_t
{
319 * Public interface of ike_sa_manager_t.
321 ike_sa_manager_t
public;
324 * Hash table with entries for the ike_sa_t objects.
326 table_item_t
**ike_sa_table
;
329 * The size of the hash table.
334 * Mask to map the hashes to table rows.
339 * Segments of the hash table.
344 * The number of segments.
349 * Mask to map a table row to a segment.
354 * Hash table with half_open_t objects.
356 table_item_t
**half_open_table
;
359 * Segments of the "half-open" hash table.
361 shareable_segment_t
*half_open_segments
;
364 * Total number of half-open IKE_SAs.
366 refcount_t half_open_count
;
369 * Total number of half-open IKE_SAs as responder.
371 refcount_t half_open_count_responder
;
374 * Total number of IKE_SAs registered with IKE_SA manager.
376 refcount_t total_sa_count
;
379 * Hash table with connected_peers_t objects.
381 table_item_t
**connected_peers_table
;
384 * Segments of the "connected peers" hash table.
386 shareable_segment_t
*connected_peers_segments
;
389 * Hash table with init_hash_t objects.
391 table_item_t
**init_hashes_table
;
394 * Segments of the "hashes" hash table.
396 segment_t
*init_hashes_segments
;
399 * RNG to get random SPIs for our side
404 * Registered callback for IKE SPIs
412 * Lock to access the RNG instance and the callback
417 * reuse existing IKE_SAs in checkout_by_config
422 * Configured IKE_SA limit, if any
428 * Acquire a lock to access the segment of the table row with the given index.
429 * It also works with the segment index directly.
431 static inline void lock_single_segment(private_ike_sa_manager_t
*this,
434 mutex_t
*lock
= this->segments
[index
& this->segment_mask
].mutex
;
439 * Release the lock required to access the segment of the table row with the given index.
440 * It also works with the segment index directly.
442 static inline void unlock_single_segment(private_ike_sa_manager_t
*this,
445 mutex_t
*lock
= this->segments
[index
& this->segment_mask
].mutex
;
452 static void lock_all_segments(private_ike_sa_manager_t
*this)
456 for (i
= 0; i
< this->segment_count
; i
++)
458 this->segments
[i
].mutex
->lock(this->segments
[i
].mutex
);
463 * Unlock all segments
465 static void unlock_all_segments(private_ike_sa_manager_t
*this)
469 for (i
= 0; i
< this->segment_count
; i
++)
471 this->segments
[i
].mutex
->unlock(this->segments
[i
].mutex
);
475 typedef struct private_enumerator_t private_enumerator_t
;
478 * hash table enumerator implementation
480 struct private_enumerator_t
{
483 * implements enumerator interface
485 enumerator_t enumerator
;
488 * associated ike_sa_manager_t
490 private_ike_sa_manager_t
*manager
;
493 * current segment index
498 * currently enumerating entry
503 * current table row index
510 table_item_t
*current
;
513 * previous table item
518 METHOD(enumerator_t
, enumerate
, bool,
519 private_enumerator_t
*this, va_list args
)
524 VA_ARGS_VGET(args
, entry
, segment
);
528 this->entry
->condvar
->signal(this->entry
->condvar
);
531 while (this->segment
< this->manager
->segment_count
)
533 while (this->row
< this->manager
->table_size
)
535 this->prev
= this->current
;
538 this->current
= this->current
->next
;
542 lock_single_segment(this->manager
, this->segment
);
543 this->current
= this->manager
->ike_sa_table
[this->row
];
547 *entry
= this->entry
= this->current
->value
;
548 *segment
= this->segment
;
551 unlock_single_segment(this->manager
, this->segment
);
552 this->row
+= this->manager
->segment_count
;
555 this->row
= this->segment
;
560 METHOD(enumerator_t
, enumerator_destroy
, void,
561 private_enumerator_t
*this)
565 this->entry
->condvar
->signal(this->entry
->condvar
);
569 unlock_single_segment(this->manager
, this->segment
);
575 * Creates an enumerator to enumerate the entries in the hash table.
577 static enumerator_t
* create_table_enumerator(private_ike_sa_manager_t
*this)
579 private_enumerator_t
*enumerator
;
583 .enumerate
= enumerator_enumerate_default
,
584 .venumerate
= _enumerate
,
585 .destroy
= _enumerator_destroy
,
589 return &enumerator
->enumerator
;
593 * Put an entry into the hash table.
594 * Note: The caller has to unlock the returned segment.
596 static u_int
put_entry(private_ike_sa_manager_t
*this, entry_t
*entry
)
598 table_item_t
*current
, *item
;
605 row
= ike_sa_id_hash(entry
->ike_sa_id
) & this->table_mask
;
606 segment
= row
& this->segment_mask
;
608 lock_single_segment(this, segment
);
609 current
= this->ike_sa_table
[row
];
611 { /* insert at the front of current bucket */
612 item
->next
= current
;
614 this->ike_sa_table
[row
] = item
;
615 ref_get(&this->total_sa_count
);
620 * Remove an entry from the hash table.
621 * Note: The caller MUST have a lock on the segment of this entry.
623 static void remove_entry(private_ike_sa_manager_t
*this, entry_t
*entry
)
625 table_item_t
*item
, *prev
= NULL
;
628 row
= ike_sa_id_hash(entry
->ike_sa_id
) & this->table_mask
;
629 item
= this->ike_sa_table
[row
];
632 if (item
->value
== entry
)
636 prev
->next
= item
->next
;
640 this->ike_sa_table
[row
] = item
->next
;
642 ignore_result(ref_put(&this->total_sa_count
));
652 * Remove the entry at the current enumerator position.
654 static void remove_entry_at(private_enumerator_t
*this)
659 table_item_t
*current
= this->current
;
661 ignore_result(ref_put(&this->manager
->total_sa_count
));
662 this->current
= this->prev
;
666 this->prev
->next
= current
->next
;
670 this->manager
->ike_sa_table
[this->row
] = current
->next
;
671 unlock_single_segment(this->manager
, this->segment
);
678 * Find an entry using the provided match function to compare the entries for
681 static status_t
get_entry_by_match_function(private_ike_sa_manager_t
*this,
682 ike_sa_id_t
*ike_sa_id
, entry_t
**entry
, u_int
*segment
,
683 bool (*match
)(entry_t
*,void*), void *param
)
688 row
= ike_sa_id_hash(ike_sa_id
) & this->table_mask
;
689 seg
= row
& this->segment_mask
;
691 lock_single_segment(this, seg
);
692 item
= this->ike_sa_table
[row
];
695 if (match(item
->value
, param
))
697 *entry
= item
->value
;
699 /* the locked segment has to be unlocked by the caller */
704 unlock_single_segment(this, seg
);
709 * Find an entry by ike_sa_id_t.
710 * Note: On SUCCESS, the caller has to unlock the segment.
712 static status_t
get_entry_by_id(private_ike_sa_manager_t
*this,
713 ike_sa_id_t
*ike_sa_id
, entry_t
**entry
, u_int
*segment
)
715 return get_entry_by_match_function(this, ike_sa_id
, entry
, segment
,
716 entry_match_by_id
, ike_sa_id
);
720 * Find an entry by IKE_SA pointer.
721 * Note: On SUCCESS, the caller has to unlock the segment.
723 static status_t
get_entry_by_sa(private_ike_sa_manager_t
*this,
724 ike_sa_id_t
*ike_sa_id
, ike_sa_t
*ike_sa
, entry_t
**entry
, u_int
*segment
)
726 return get_entry_by_match_function(this, ike_sa_id
, entry
, segment
,
727 entry_match_by_sa
, ike_sa
);
731 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
734 static bool wait_for_entry(private_ike_sa_manager_t
*this, entry_t
*entry
,
737 if (entry
->driveout_new_threads
)
739 /* we are not allowed to get this */
742 while (entry
->checked_out
&& !entry
->driveout_waiting_threads
)
744 /* so wait until we can get it for us.
745 * we register us as waiting. */
746 entry
->waiting_threads
++;
747 entry
->condvar
->wait(entry
->condvar
, this->segments
[segment
].mutex
);
748 entry
->waiting_threads
--;
750 /* hm, a deletion request forbids us to get this SA, get next one */
751 if (entry
->driveout_waiting_threads
)
753 /* we must signal here, others may be waiting on it, too */
754 entry
->condvar
->signal(entry
->condvar
);
761 * Put a half-open SA into the hash table.
763 static void put_half_open(private_ike_sa_manager_t
*this, entry_t
*entry
)
769 half_open_t
*half_open
;
772 ike_id
= entry
->ike_sa_id
;
773 addr
= entry
->other
->get_address(entry
->other
);
774 row
= chunk_hash(addr
) & this->table_mask
;
775 segment
= row
& this->segment_mask
;
776 lock
= this->half_open_segments
[segment
].lock
;
777 lock
->write_lock(lock
);
778 item
= this->half_open_table
[row
];
781 half_open
= item
->value
;
783 if (chunk_equals(addr
, half_open
->other
))
793 .other
= chunk_clone(addr
),
797 .next
= this->half_open_table
[row
],
799 this->half_open_table
[row
] = item
;
802 ref_get(&this->half_open_count
);
803 if (!ike_id
->is_initiator(ike_id
))
805 half_open
->count_responder
++;
806 ref_get(&this->half_open_count_responder
);
808 this->half_open_segments
[segment
].count
++;
813 * Remove a half-open SA from the hash table.
815 static void remove_half_open(private_ike_sa_manager_t
*this, entry_t
*entry
)
817 table_item_t
*item
, *prev
= NULL
;
823 ike_id
= entry
->ike_sa_id
;
824 addr
= entry
->other
->get_address(entry
->other
);
825 row
= chunk_hash(addr
) & this->table_mask
;
826 segment
= row
& this->segment_mask
;
827 lock
= this->half_open_segments
[segment
].lock
;
828 lock
->write_lock(lock
);
829 item
= this->half_open_table
[row
];
832 half_open_t
*half_open
= item
->value
;
834 if (chunk_equals(addr
, half_open
->other
))
836 if (!ike_id
->is_initiator(ike_id
))
838 half_open
->count_responder
--;
839 ignore_result(ref_put(&this->half_open_count_responder
));
841 ignore_result(ref_put(&this->half_open_count
));
842 if (--half_open
->count
== 0)
846 prev
->next
= item
->next
;
850 this->half_open_table
[row
] = item
->next
;
852 half_open_destroy(half_open
);
855 this->half_open_segments
[segment
].count
--;
864 CALLBACK(id_matches
, bool,
865 ike_sa_id_t
*a
, va_list args
)
869 VA_ARGS_VGET(args
, b
);
870 return a
->equals(a
, b
);
874 * Put an SA between two peers into the hash table.
876 static void put_connected_peers(private_ike_sa_manager_t
*this, entry_t
*entry
)
881 connected_peers_t
*connected_peers
;
882 chunk_t my_id
, other_id
;
885 my_id
= entry
->my_id
->get_encoding(entry
->my_id
);
886 other_id
= entry
->other_id
->get_encoding(entry
->other_id
);
887 family
= entry
->other
->get_family(entry
->other
);
888 row
= chunk_hash_inc(other_id
, chunk_hash(my_id
)) & this->table_mask
;
889 segment
= row
& this->segment_mask
;
890 lock
= this->connected_peers_segments
[segment
].lock
;
891 lock
->write_lock(lock
);
892 item
= this->connected_peers_table
[row
];
895 connected_peers
= item
->value
;
897 if (connected_peers_match(connected_peers
, entry
->my_id
,
898 entry
->other_id
, family
))
900 if (connected_peers
->sas
->find_first(connected_peers
->sas
,
901 id_matches
, NULL
, entry
->ike_sa_id
))
913 INIT(connected_peers
,
914 .my_id
= entry
->my_id
->clone(entry
->my_id
),
915 .other_id
= entry
->other_id
->clone(entry
->other_id
),
917 .sas
= linked_list_create(),
920 .value
= connected_peers
,
921 .next
= this->connected_peers_table
[row
],
923 this->connected_peers_table
[row
] = item
;
925 connected_peers
->sas
->insert_last(connected_peers
->sas
,
926 entry
->ike_sa_id
->clone(entry
->ike_sa_id
));
927 this->connected_peers_segments
[segment
].count
++;
932 * Remove an SA between two peers from the hash table.
934 static void remove_connected_peers(private_ike_sa_manager_t
*this, entry_t
*entry
)
936 table_item_t
*item
, *prev
= NULL
;
939 chunk_t my_id
, other_id
;
942 my_id
= entry
->my_id
->get_encoding(entry
->my_id
);
943 other_id
= entry
->other_id
->get_encoding(entry
->other_id
);
944 family
= entry
->other
->get_family(entry
->other
);
946 row
= chunk_hash_inc(other_id
, chunk_hash(my_id
)) & this->table_mask
;
947 segment
= row
& this->segment_mask
;
949 lock
= this->connected_peers_segments
[segment
].lock
;
950 lock
->write_lock(lock
);
951 item
= this->connected_peers_table
[row
];
954 connected_peers_t
*current
= item
->value
;
956 if (connected_peers_match(current
, entry
->my_id
, entry
->other_id
,
959 enumerator_t
*enumerator
;
960 ike_sa_id_t
*ike_sa_id
;
962 enumerator
= current
->sas
->create_enumerator(current
->sas
);
963 while (enumerator
->enumerate(enumerator
, &ike_sa_id
))
965 if (ike_sa_id
->equals(ike_sa_id
, entry
->ike_sa_id
))
967 current
->sas
->remove_at(current
->sas
, enumerator
);
968 ike_sa_id
->destroy(ike_sa_id
);
969 this->connected_peers_segments
[segment
].count
--;
973 enumerator
->destroy(enumerator
);
974 if (current
->sas
->get_count(current
->sas
) == 0)
978 prev
->next
= item
->next
;
982 this->connected_peers_table
[row
] = item
->next
;
984 connected_peers_destroy(current
);
996 * Get a random SPI for new IKE_SAs
998 static uint64_t get_spi(private_ike_sa_manager_t
*this)
1002 this->spi_lock
->read_lock(this->spi_lock
);
1003 if (this->spi_cb
.cb
)
1005 spi
= this->spi_cb
.cb(this->spi_cb
.data
);
1007 else if (!this->rng
||
1008 !this->rng
->get_bytes(this->rng
, sizeof(spi
), (uint8_t*)&spi
))
1012 this->spi_lock
->unlock(this->spi_lock
);
1017 * Calculate the hash of the initial IKE message. Memory for the hash is
1018 * allocated on success.
1020 * @returns TRUE on success
1022 static bool get_init_hash(hasher_t
*hasher
, message_t
*message
, chunk_t
*hash
)
1026 if (message
->get_first_payload_type(message
) == PLV1_FRAGMENT
)
1027 { /* only hash the source IP, port and SPI for fragmented init messages */
1031 src
= message
->get_source(message
);
1032 if (!hasher
->allocate_hash(hasher
, src
->get_address(src
), NULL
))
1036 port
= src
->get_port(src
);
1037 if (!hasher
->allocate_hash(hasher
, chunk_from_thing(port
), NULL
))
1041 spi
= message
->get_initiator_spi(message
);
1042 return hasher
->allocate_hash(hasher
, chunk_from_thing(spi
), hash
);
1044 if (message
->get_exchange_type(message
) == ID_PROT
)
1045 { /* include the source for Main Mode as the hash will be the same if
1046 * SPIs are reused by two initiators that use the same proposal */
1047 src
= message
->get_source(message
);
1049 if (!hasher
->allocate_hash(hasher
, src
->get_address(src
), NULL
))
1054 return hasher
->allocate_hash(hasher
, message
->get_packet_data(message
), hash
);
1058 * Check if we already have created an IKE_SA based on the initial IKE message
1059 * with the given hash.
1060 * If not the hash is stored, the hash data is not(!) cloned.
1062 * Also, the local SPI is returned. In case of a retransmit this is already
1063 * stored together with the hash, otherwise it is newly allocated and should
1064 * be used to create the IKE_SA.
1066 * @returns ALREADY_DONE if the message with the given hash has been seen before
1067 * NOT_FOUND if the message hash was not found
1068 * FAILED if the SPI allocation failed
1070 static status_t
check_and_put_init_hash(private_ike_sa_manager_t
*this,
1071 chunk_t init_hash
, uint64_t *our_spi
)
1079 row
= chunk_hash(init_hash
) & this->table_mask
;
1080 segment
= row
& this->segment_mask
;
1081 mutex
= this->init_hashes_segments
[segment
].mutex
;
1083 item
= this->init_hashes_table
[row
];
1086 init_hash_t
*current
= item
->value
;
1088 if (chunk_equals(init_hash
, current
->hash
))
1090 *our_spi
= current
->our_spi
;
1091 mutex
->unlock(mutex
);
1092 return ALREADY_DONE
;
1097 spi
= get_spi(this);
1105 .len
= init_hash
.len
,
1106 .ptr
= init_hash
.ptr
,
1112 .next
= this->init_hashes_table
[row
],
1114 this->init_hashes_table
[row
] = item
;
1115 *our_spi
= init
->our_spi
;
1116 mutex
->unlock(mutex
);
1121 * Remove the hash of an initial IKE message from the cache.
1123 static void remove_init_hash(private_ike_sa_manager_t
*this, chunk_t init_hash
)
1125 table_item_t
*item
, *prev
= NULL
;
1129 row
= chunk_hash(init_hash
) & this->table_mask
;
1130 segment
= row
& this->segment_mask
;
1131 mutex
= this->init_hashes_segments
[segment
].mutex
;
1133 item
= this->init_hashes_table
[row
];
1136 init_hash_t
*current
= item
->value
;
1138 if (chunk_equals(init_hash
, current
->hash
))
1142 prev
->next
= item
->next
;
1146 this->init_hashes_table
[row
] = item
->next
;
1155 mutex
->unlock(mutex
);
1158 METHOD(ike_sa_manager_t
, checkout
, ike_sa_t
*,
1159 private_ike_sa_manager_t
*this, ike_sa_id_t
*ike_sa_id
)
1161 ike_sa_t
*ike_sa
= NULL
;
1165 DBG2(DBG_MGR
, "checkout %N SA with SPIs %.16"PRIx64
"_i %.16"PRIx64
"_r",
1166 ike_version_names
, ike_sa_id
->get_ike_version(ike_sa_id
),
1167 be64toh(ike_sa_id
->get_initiator_spi(ike_sa_id
)),
1168 be64toh(ike_sa_id
->get_responder_spi(ike_sa_id
)));
1170 if (get_entry_by_id(this, ike_sa_id
, &entry
, &segment
) == SUCCESS
)
1172 if (wait_for_entry(this, entry
, segment
))
1174 entry
->checked_out
= thread_current();
1175 ike_sa
= entry
->ike_sa
;
1176 DBG2(DBG_MGR
, "IKE_SA %s[%u] successfully checked out",
1177 ike_sa
->get_name(ike_sa
), ike_sa
->get_unique_id(ike_sa
));
1179 unlock_single_segment(this, segment
);
1181 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1185 DBG2(DBG_MGR
, "IKE_SA checkout not successful");
1190 METHOD(ike_sa_manager_t
, checkout_new
, ike_sa_t
*,
1191 private_ike_sa_manager_t
* this, ike_version_t version
, bool initiator
)
1193 ike_sa_id_t
*ike_sa_id
;
1195 uint8_t ike_version
;
1198 ike_version
= version
== IKEV1
? IKEV1_MAJOR_VERSION
: IKEV2_MAJOR_VERSION
;
1200 spi
= get_spi(this);
1203 DBG1(DBG_MGR
, "failed to allocate SPI for new IKE_SA");
1209 ike_sa_id
= ike_sa_id_create(ike_version
, spi
, 0, TRUE
);
1213 ike_sa_id
= ike_sa_id_create(ike_version
, 0, spi
, FALSE
);
1215 ike_sa
= ike_sa_create(ike_sa_id
, initiator
, version
);
1216 ike_sa_id
->destroy(ike_sa_id
);
1220 DBG2(DBG_MGR
, "created IKE_SA %s[%u]", ike_sa
->get_name(ike_sa
),
1221 ike_sa
->get_unique_id(ike_sa
));
1227 * Get the message ID or message hash to detect early retransmissions
1229 static uint32_t get_message_id_or_hash(message_t
*message
)
1231 if (message
->get_major_version(message
) == IKEV1_MAJOR_VERSION
)
1233 /* Use a hash for IKEv1 Phase 1, where we don't have a MID, and Quick
1234 * Mode, where all three messages use the same message ID */
1235 if (message
->get_message_id(message
) == 0 ||
1236 message
->get_exchange_type(message
) == QUICK_MODE
)
1238 return chunk_hash(message
->get_packet_data(message
));
1241 return message
->get_message_id(message
);
1244 METHOD(ike_sa_manager_t
, checkout_by_message
, ike_sa_t
*,
1245 private_ike_sa_manager_t
* this, message_t
*message
)
1249 ike_sa_t
*ike_sa
= NULL
;
1251 ike_version_t ike_version
;
1252 bool is_init
= FALSE
;
1254 id
= message
->get_ike_sa_id(message
);
1255 /* clone the IKE_SA ID so we can modify the initiator flag */
1257 id
->switch_initiator(id
);
1259 DBG2(DBG_MGR
, "checkout %N SA by message with SPIs %.16"PRIx64
"_i "
1260 "%.16"PRIx64
"_r", ike_version_names
, id
->get_ike_version(id
),
1261 be64toh(id
->get_initiator_spi(id
)),
1262 be64toh(id
->get_responder_spi(id
)));
1264 if (id
->get_responder_spi(id
) == 0 &&
1265 message
->get_message_id(message
) == 0)
1267 if (message
->get_major_version(message
) == IKEV2_MAJOR_VERSION
)
1269 if (message
->get_exchange_type(message
) == IKE_SA_INIT
&&
1270 message
->get_request(message
))
1272 ike_version
= IKEV2
;
1278 if (message
->get_exchange_type(message
) == ID_PROT
||
1279 message
->get_exchange_type(message
) == AGGRESSIVE
)
1281 ike_version
= IKEV1
;
1283 if (id
->is_initiator(id
))
1284 { /* not set in IKEv1, switch back before applying to new SA */
1285 id
->switch_initiator(id
);
1297 hasher
= lib
->crypto
->create_hasher(lib
->crypto
, HASH_SHA1
);
1298 if (!hasher
|| !get_init_hash(hasher
, message
, &hash
))
1300 DBG1(DBG_MGR
, "ignoring message, failed to hash message");
1305 hasher
->destroy(hasher
);
1307 /* ensure this is not a retransmit of an already handled init message */
1308 switch (check_and_put_init_hash(this, hash
, &our_spi
))
1311 { /* we've not seen this packet yet, create a new IKE_SA */
1312 if (!this->ikesa_limit
||
1313 this->public.get_count(&this->public) < this->ikesa_limit
)
1315 id
->set_responder_spi(id
, our_spi
);
1316 ike_sa
= ike_sa_create(id
, FALSE
, ike_version
);
1319 entry
= entry_create();
1320 entry
->ike_sa
= ike_sa
;
1321 entry
->ike_sa_id
= id
;
1322 entry
->processing
= get_message_id_or_hash(message
);
1323 entry
->init_hash
= hash
;
1325 segment
= put_entry(this, entry
);
1326 entry
->checked_out
= thread_current();
1327 unlock_single_segment(this, segment
);
1329 DBG2(DBG_MGR
, "created IKE_SA %s[%u]",
1330 ike_sa
->get_name(ike_sa
),
1331 ike_sa
->get_unique_id(ike_sa
));
1336 DBG1(DBG_MGR
, "creating IKE_SA failed, ignoring message");
1341 DBG1(DBG_MGR
, "ignoring %N, hitting IKE_SA limit (%u)",
1342 exchange_type_names
, message
->get_exchange_type(message
),
1345 remove_init_hash(this, hash
);
1351 { /* we failed to allocate an SPI */
1354 DBG1(DBG_MGR
, "ignoring message, failed to allocate SPI");
1361 /* it looks like we already handled this init message to some degree */
1362 id
->set_responder_spi(id
, our_spi
);
1366 if (get_entry_by_id(this, id
, &entry
, &segment
) == SUCCESS
)
1368 /* only check out if we are not already processing it. */
1369 if (entry
->processing
== get_message_id_or_hash(message
))
1371 DBG1(DBG_MGR
, "ignoring request with ID %u, already processing",
1374 else if (wait_for_entry(this, entry
, segment
))
1376 ike_sa_id_t
*ike_id
;
1378 ike_id
= entry
->ike_sa
->get_id(entry
->ike_sa
);
1379 entry
->checked_out
= thread_current();
1380 if (message
->get_first_payload_type(message
) != PLV1_FRAGMENT
&&
1381 message
->get_first_payload_type(message
) != PLV2_FRAGMENT
)
1382 { /* TODO-FRAG: this fails if there are unencrypted payloads */
1383 entry
->processing
= get_message_id_or_hash(message
);
1385 if (ike_id
->get_responder_spi(ike_id
) == 0)
1387 ike_id
->set_responder_spi(ike_id
, id
->get_responder_spi(id
));
1389 ike_sa
= entry
->ike_sa
;
1390 DBG2(DBG_MGR
, "IKE_SA %s[%u] successfully checked out",
1391 ike_sa
->get_name(ike_sa
), ike_sa
->get_unique_id(ike_sa
));
1393 unlock_single_segment(this, segment
);
1397 charon
->bus
->alert(charon
->bus
, ALERT_INVALID_IKE_SPI
, message
);
1402 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1405 DBG2(DBG_MGR
, "IKE_SA checkout not successful");
1410 METHOD(ike_sa_manager_t
, checkout_by_config
, ike_sa_t
*,
1411 private_ike_sa_manager_t
*this, peer_cfg_t
*peer_cfg
)
1413 enumerator_t
*enumerator
;
1415 ike_sa_t
*ike_sa
= NULL
;
1416 peer_cfg_t
*current_peer
;
1417 ike_cfg_t
*current_ike
;
1420 DBG2(DBG_MGR
, "checkout IKE_SA by config");
1422 if (!this->reuse_ikesa
&& peer_cfg
->get_ike_version(peer_cfg
) != IKEV1
)
1423 { /* IKE_SA reuse disabled by config (not possible for IKEv1) */
1424 ike_sa
= checkout_new(this, peer_cfg
->get_ike_version(peer_cfg
), TRUE
);
1425 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1429 enumerator
= create_table_enumerator(this);
1430 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
1432 if (!wait_for_entry(this, entry
, segment
))
1436 if (entry
->ike_sa
->get_state(entry
->ike_sa
) == IKE_DELETING
||
1437 entry
->ike_sa
->get_state(entry
->ike_sa
) == IKE_REKEYED
)
1438 { /* skip IKE_SAs which are not usable, wake other waiting threads */
1439 entry
->condvar
->signal(entry
->condvar
);
1443 current_peer
= entry
->ike_sa
->get_peer_cfg(entry
->ike_sa
);
1444 if (current_peer
&& current_peer
->equals(current_peer
, peer_cfg
))
1446 current_ike
= current_peer
->get_ike_cfg(current_peer
);
1447 if (current_ike
->equals(current_ike
, peer_cfg
->get_ike_cfg(peer_cfg
)))
1449 entry
->checked_out
= thread_current();
1450 ike_sa
= entry
->ike_sa
;
1451 DBG2(DBG_MGR
, "found existing IKE_SA %u with a '%s' config",
1452 ike_sa
->get_unique_id(ike_sa
),
1453 current_peer
->get_name(current_peer
));
1457 /* other threads might be waiting for this entry */
1458 entry
->condvar
->signal(entry
->condvar
);
1460 enumerator
->destroy(enumerator
);
1463 { /* no IKE_SA using such a config, hand out a new */
1464 ike_sa
= checkout_new(this, peer_cfg
->get_ike_version(peer_cfg
), TRUE
);
1466 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1471 DBG2(DBG_MGR
, "IKE_SA checkout not successful");
1476 METHOD(ike_sa_manager_t
, checkout_by_id
, ike_sa_t
*,
1477 private_ike_sa_manager_t
*this, uint32_t id
)
1479 enumerator_t
*enumerator
;
1481 ike_sa_t
*ike_sa
= NULL
;
1484 DBG2(DBG_MGR
, "checkout IKE_SA by unique ID %u", id
);
1486 enumerator
= create_table_enumerator(this);
1487 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
1489 if (wait_for_entry(this, entry
, segment
))
1491 if (entry
->ike_sa
->get_unique_id(entry
->ike_sa
) == id
)
1493 ike_sa
= entry
->ike_sa
;
1494 entry
->checked_out
= thread_current();
1497 /* other threads might be waiting for this entry */
1498 entry
->condvar
->signal(entry
->condvar
);
1501 enumerator
->destroy(enumerator
);
1505 DBG2(DBG_MGR
, "IKE_SA %s[%u] successfully checked out",
1506 ike_sa
->get_name(ike_sa
), ike_sa
->get_unique_id(ike_sa
));
1510 DBG2(DBG_MGR
, "IKE_SA checkout not successful");
1512 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1516 METHOD(ike_sa_manager_t
, checkout_by_name
, ike_sa_t
*,
1517 private_ike_sa_manager_t
*this, char *name
, bool child
)
1519 enumerator_t
*enumerator
, *children
;
1521 ike_sa_t
*ike_sa
= NULL
;
1522 child_sa_t
*child_sa
;
1525 DBG2(DBG_MGR
, "checkout IKE_SA by%s name '%s'", child
? " child" : "", name
);
1527 enumerator
= create_table_enumerator(this);
1528 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
1530 if (wait_for_entry(this, entry
, segment
))
1532 /* look for a child with such a policy name ... */
1535 children
= entry
->ike_sa
->create_child_sa_enumerator(entry
->ike_sa
);
1536 while (children
->enumerate(children
, (void**)&child_sa
))
1538 if (streq(child_sa
->get_name(child_sa
), name
))
1540 ike_sa
= entry
->ike_sa
;
1544 children
->destroy(children
);
1546 else /* ... or for a IKE_SA with such a connection name */
1548 if (streq(entry
->ike_sa
->get_name(entry
->ike_sa
), name
))
1550 ike_sa
= entry
->ike_sa
;
1553 /* got one, return */
1556 entry
->checked_out
= thread_current();
1557 DBG2(DBG_MGR
, "IKE_SA %s[%u] successfully checked out",
1558 ike_sa
->get_name(ike_sa
), ike_sa
->get_unique_id(ike_sa
));
1561 /* other threads might be waiting for this entry */
1562 entry
->condvar
->signal(entry
->condvar
);
1565 enumerator
->destroy(enumerator
);
1567 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1571 DBG2(DBG_MGR
, "IKE_SA checkout not successful");
1576 METHOD(ike_sa_manager_t
, new_initiator_spi
, bool,
1577 private_ike_sa_manager_t
*this, ike_sa_t
*ike_sa
)
1579 ike_sa_state_t state
;
1580 ike_sa_id_t
*ike_sa_id
;
1583 uint64_t new_spi
, spi
;
1585 state
= ike_sa
->get_state(ike_sa
);
1586 if (state
!= IKE_CONNECTING
)
1588 DBG1(DBG_MGR
, "unable to change initiator SPI for IKE_SA in state "
1589 "%N", ike_sa_state_names
, state
);
1593 ike_sa_id
= ike_sa
->get_id(ike_sa
);
1594 if (!ike_sa_id
->is_initiator(ike_sa_id
))
1596 DBG1(DBG_MGR
, "unable to change initiator SPI of IKE_SA as responder");
1600 if (ike_sa
!= charon
->bus
->get_sa(charon
->bus
))
1602 DBG1(DBG_MGR
, "unable to change initiator SPI of IKE_SA not checked "
1603 "out by current thread");
1607 new_spi
= get_spi(this);
1610 DBG1(DBG_MGR
, "unable to allocate new initiator SPI for IKE_SA");
1614 if (get_entry_by_sa(this, ike_sa_id
, ike_sa
, &entry
, &segment
) == SUCCESS
)
1616 if (entry
->driveout_waiting_threads
&& entry
->driveout_new_threads
)
1617 { /* it looks like flush() has been called and the SA is being deleted
1618 * anyway, no need for a new SPI */
1619 DBG2(DBG_MGR
, "ignored change of initiator SPI during shutdown");
1620 unlock_single_segment(this, segment
);
1626 DBG1(DBG_MGR
, "unable to change initiator SPI of IKE_SA, not found");
1630 /* the hashtable row and segment are determined by the local SPI as
1631 * initiator, so if we change it the row and segment derived from it might
1632 * change as well. This could be a problem for threads waiting for the
1633 * entry (in particular those enumerating entries to check them out by
1634 * unique ID or name). In order to avoid having to drive them out and thus
1635 * preventing them from checking out the entry (even though the ID or name
1636 * will not change and enumerating it is also fine), we mask the new SPI and
1637 * merge it with the old SPI so the entry ends up in the same row/segment.
1638 * Since SPIs are 64-bit and the number of rows/segments is usually
1639 * relatively low this should not be a problem. */
1640 spi
= ike_sa_id
->get_initiator_spi(ike_sa_id
);
1641 new_spi
= (spi
& (uint64_t)this->table_mask
) |
1642 (new_spi
& ~(uint64_t)this->table_mask
);
1644 DBG2(DBG_MGR
, "change initiator SPI of IKE_SA %s[%u] from %.16"PRIx64
" to "
1645 "%.16"PRIx64
, ike_sa
->get_name(ike_sa
), ike_sa
->get_unique_id(ike_sa
),
1646 be64toh(spi
), be64toh(new_spi
));
1648 ike_sa_id
->set_initiator_spi(ike_sa_id
, new_spi
);
1649 entry
->ike_sa_id
->replace_values(entry
->ike_sa_id
, ike_sa_id
);
1651 entry
->condvar
->signal(entry
->condvar
);
1652 unlock_single_segment(this, segment
);
1656 CALLBACK(enumerator_filter_wait
, bool,
1657 private_ike_sa_manager_t
*this, enumerator_t
*orig
, va_list args
)
1663 VA_ARGS_VGET(args
, out
);
1665 while (orig
->enumerate(orig
, &entry
, &segment
))
1667 if (wait_for_entry(this, entry
, segment
))
1669 *out
= entry
->ike_sa
;
1670 charon
->bus
->set_sa(charon
->bus
, *out
);
1677 CALLBACK(enumerator_filter_skip
, bool,
1678 private_ike_sa_manager_t
*this, enumerator_t
*orig
, va_list args
)
1684 VA_ARGS_VGET(args
, out
);
1686 while (orig
->enumerate(orig
, &entry
, &segment
))
1688 if (!entry
->driveout_new_threads
&&
1689 !entry
->driveout_waiting_threads
&&
1690 !entry
->checked_out
)
1692 *out
= entry
->ike_sa
;
1693 charon
->bus
->set_sa(charon
->bus
, *out
);
1700 CALLBACK(reset_sa
, void,
1703 charon
->bus
->set_sa(charon
->bus
, NULL
);
1706 METHOD(ike_sa_manager_t
, create_enumerator
, enumerator_t
*,
1707 private_ike_sa_manager_t
* this, bool wait
)
1709 return enumerator_create_filter(create_table_enumerator(this),
1710 wait
? (void*)enumerator_filter_wait
: (void*)enumerator_filter_skip
,
1714 METHOD(ike_sa_manager_t
, checkin
, void,
1715 private_ike_sa_manager_t
*this, ike_sa_t
*ike_sa
)
1717 /* to check the SA back in, we look for the pointer of the ike_sa
1719 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1720 * on reception of a IKE_SA_INIT response) the lookup will work but
1721 * updating of the SPI MAY be necessary...
1724 ike_sa_id_t
*ike_sa_id
;
1726 identification_t
*my_id
, *other_id
;
1729 ike_sa_id
= ike_sa
->get_id(ike_sa
);
1730 my_id
= ike_sa
->get_my_id(ike_sa
);
1731 other_id
= ike_sa
->get_other_eap_id(ike_sa
);
1732 other
= ike_sa
->get_other_host(ike_sa
);
1734 DBG2(DBG_MGR
, "checkin IKE_SA %s[%u]", ike_sa
->get_name(ike_sa
),
1735 ike_sa
->get_unique_id(ike_sa
));
1737 /* look for the entry */
1738 if (get_entry_by_sa(this, ike_sa_id
, ike_sa
, &entry
, &segment
) == SUCCESS
)
1740 /* ike_sa_id must be updated */
1741 entry
->ike_sa_id
->replace_values(entry
->ike_sa_id
, ike_sa
->get_id(ike_sa
));
1742 /* signal waiting threads */
1743 entry
->checked_out
= NULL
;
1744 entry
->processing
= -1;
1745 /* check if this SA is half-open */
1746 if (entry
->half_open
&& ike_sa
->get_state(ike_sa
) != IKE_CONNECTING
)
1748 /* not half open anymore */
1749 entry
->half_open
= FALSE
;
1750 remove_half_open(this, entry
);
1752 else if (entry
->half_open
&& !other
->ip_equals(other
, entry
->other
))
1754 /* the other host's IP has changed, we must update the hash table */
1755 remove_half_open(this, entry
);
1756 DESTROY_IF(entry
->other
);
1757 entry
->other
= other
->clone(other
);
1758 put_half_open(this, entry
);
1760 else if (!entry
->half_open
&&
1761 ike_sa
->get_state(ike_sa
) == IKE_CONNECTING
)
1763 /* this is a new half-open SA */
1764 entry
->half_open
= TRUE
;
1765 entry
->other
= other
->clone(other
);
1766 put_half_open(this, entry
);
1768 entry
->condvar
->signal(entry
->condvar
);
1772 entry
= entry_create();
1773 entry
->ike_sa_id
= ike_sa_id
->clone(ike_sa_id
);
1774 entry
->ike_sa
= ike_sa
;
1775 if (ike_sa
->get_state(ike_sa
) == IKE_CONNECTING
)
1777 entry
->half_open
= TRUE
;
1778 entry
->other
= other
->clone(other
);
1779 put_half_open(this, entry
);
1781 segment
= put_entry(this, entry
);
1783 DBG2(DBG_MGR
, "checkin of IKE_SA successful");
1785 /* apply identities for duplicate test */
1786 if ((ike_sa
->get_state(ike_sa
) == IKE_ESTABLISHED
||
1787 ike_sa
->get_state(ike_sa
) == IKE_PASSIVE
) &&
1788 entry
->my_id
== NULL
&& entry
->other_id
== NULL
)
1790 if (ike_sa
->get_version(ike_sa
) == IKEV1
)
1792 /* If authenticated and received INITIAL_CONTACT,
1793 * delete any existing IKE_SAs with that peer. */
1794 if (ike_sa
->has_condition(ike_sa
, COND_INIT_CONTACT_SEEN
))
1796 /* We can't hold the segment locked while checking the
1797 * uniqueness as this could lead to deadlocks. We mark the
1798 * entry as checked out while we release the lock so no other
1799 * thread can acquire it. Since it is not yet in the list of
1800 * connected peers that will not cause a deadlock as no other
1801 * caller of check_unqiueness() will try to check out this SA */
1802 entry
->checked_out
= thread_current();
1803 unlock_single_segment(this, segment
);
1805 this->public.check_uniqueness(&this->public, ike_sa
, TRUE
);
1806 ike_sa
->set_condition(ike_sa
, COND_INIT_CONTACT_SEEN
, FALSE
);
1808 /* The entry could have been modified in the mean time, e.g.
1809 * because another SA was added/removed next to it or another
1810 * thread is waiting, but it should still exist, so there is no
1811 * need for a lookup via get_entry_by... */
1812 lock_single_segment(this, segment
);
1813 entry
->checked_out
= NULL
;
1814 /* We already signaled waiting threads above, we have to do that
1815 * again after checking the SA out and back in again. */
1816 entry
->condvar
->signal(entry
->condvar
);
1820 entry
->my_id
= my_id
->clone(my_id
);
1821 entry
->other_id
= other_id
->clone(other_id
);
1824 entry
->other
= other
->clone(other
);
1826 put_connected_peers(this, entry
);
1829 unlock_single_segment(this, segment
);
1831 charon
->bus
->set_sa(charon
->bus
, NULL
);
1834 METHOD(ike_sa_manager_t
, checkin_and_destroy
, void,
1835 private_ike_sa_manager_t
*this, ike_sa_t
*ike_sa
)
1837 /* deletion is a bit complex, we must ensure that no thread is waiting for
1839 * We take this SA from the table, and start signaling while threads
1840 * are in the condvar.
1843 ike_sa_id_t
*ike_sa_id
;
1846 ike_sa_id
= ike_sa
->get_id(ike_sa
);
1848 DBG2(DBG_MGR
, "checkin and destroy IKE_SA %s[%u]", ike_sa
->get_name(ike_sa
),
1849 ike_sa
->get_unique_id(ike_sa
));
1851 if (get_entry_by_sa(this, ike_sa_id
, ike_sa
, &entry
, &segment
) == SUCCESS
)
1853 if (entry
->driveout_waiting_threads
&& entry
->driveout_new_threads
)
1854 { /* it looks like flush() has been called and the SA is being deleted
1855 * anyway, just check it in */
1856 DBG2(DBG_MGR
, "ignored checkin and destroy of IKE_SA during shutdown");
1857 entry
->checked_out
= NULL
;
1858 entry
->condvar
->broadcast(entry
->condvar
);
1859 unlock_single_segment(this, segment
);
1863 /* drive out waiting threads, as we are in hurry */
1864 entry
->driveout_waiting_threads
= TRUE
;
1865 /* mark it, so no new threads can get this entry */
1866 entry
->driveout_new_threads
= TRUE
;
1867 /* wait until all workers have done their work */
1868 while (entry
->waiting_threads
)
1871 entry
->condvar
->broadcast(entry
->condvar
);
1872 /* they will wake us again when their work is done */
1873 entry
->condvar
->wait(entry
->condvar
, this->segments
[segment
].mutex
);
1875 remove_entry(this, entry
);
1876 unlock_single_segment(this, segment
);
1878 if (entry
->half_open
)
1880 remove_half_open(this, entry
);
1882 if (entry
->my_id
&& entry
->other_id
)
1884 remove_connected_peers(this, entry
);
1886 if (entry
->init_hash
.ptr
)
1888 remove_init_hash(this, entry
->init_hash
);
1891 entry_destroy(entry
);
1893 DBG2(DBG_MGR
, "checkin and destroy of IKE_SA successful");
1897 DBG1(DBG_MGR
, "tried to checkin and delete nonexisting IKE_SA");
1898 ike_sa
->destroy(ike_sa
);
1900 charon
->bus
->set_sa(charon
->bus
, NULL
);
1904 * Cleanup function for create_id_enumerator
1906 static void id_enumerator_cleanup(linked_list_t
*ids
)
1908 ids
->destroy_offset(ids
, offsetof(ike_sa_id_t
, destroy
));
1911 METHOD(ike_sa_manager_t
, create_id_enumerator
, enumerator_t
*,
1912 private_ike_sa_manager_t
*this, identification_t
*me
,
1913 identification_t
*other
, int family
)
1918 linked_list_t
*ids
= NULL
;
1920 row
= chunk_hash_inc(other
->get_encoding(other
),
1921 chunk_hash(me
->get_encoding(me
))) & this->table_mask
;
1922 segment
= row
& this->segment_mask
;
1924 lock
= this->connected_peers_segments
[segment
].lock
;
1925 lock
->read_lock(lock
);
1926 item
= this->connected_peers_table
[row
];
1929 connected_peers_t
*current
= item
->value
;
1931 if (connected_peers_match(current
, me
, other
, family
))
1933 ids
= current
->sas
->clone_offset(current
->sas
,
1934 offsetof(ike_sa_id_t
, clone
));
1943 return enumerator_create_empty();
1945 return enumerator_create_cleaner(ids
->create_enumerator(ids
),
1946 (void*)id_enumerator_cleanup
, ids
);
1950 * Move all CHILD_SAs and virtual IPs from old to new
1952 static void adopt_children_and_vips(ike_sa_t
*old
, ike_sa_t
*new)
1954 enumerator_t
*enumerator
;
1955 child_sa_t
*child_sa
;
1957 int chcount
= 0, vipcount
= 0;
1959 charon
->bus
->children_migrate(charon
->bus
, new->get_id(new),
1960 new->get_unique_id(new));
1961 enumerator
= old
->create_child_sa_enumerator(old
);
1962 while (enumerator
->enumerate(enumerator
, &child_sa
))
1964 old
->remove_child_sa(old
, enumerator
);
1965 new->add_child_sa(new, child_sa
);
1968 enumerator
->destroy(enumerator
);
1970 new->adopt_child_tasks(new, old
);
1972 enumerator
= old
->create_virtual_ip_enumerator(old
, FALSE
);
1973 while (enumerator
->enumerate(enumerator
, &vip
))
1975 new->add_virtual_ip(new, FALSE
, vip
);
1978 enumerator
->destroy(enumerator
);
1979 /* this does not release the addresses, which is good, but it does trigger
1980 * an assign_vips(FALSE) event... */
1981 old
->clear_virtual_ips(old
, FALSE
);
1982 /* ...trigger the analogous event on the new SA */
1983 charon
->bus
->set_sa(charon
->bus
, new);
1984 charon
->bus
->assign_vips(charon
->bus
, new, TRUE
);
1985 charon
->bus
->children_migrate(charon
->bus
, NULL
, 0);
1986 charon
->bus
->set_sa(charon
->bus
, old
);
1988 if (chcount
|| vipcount
)
1990 DBG1(DBG_IKE
, "detected reauth of existing IKE_SA, adopting %d "
1991 "children and %d virtual IPs", chcount
, vipcount
);
1996 * Delete an existing IKE_SA due to a unique replace policy
1998 static status_t
enforce_replace(private_ike_sa_manager_t
*this,
1999 ike_sa_t
*duplicate
, ike_sa_t
*new,
2000 identification_t
*other
, host_t
*host
)
2002 charon
->bus
->alert(charon
->bus
, ALERT_UNIQUE_REPLACE
);
2004 if (host
->equals(host
, duplicate
->get_other_host(duplicate
)))
2006 /* looks like a reauthentication attempt */
2007 if (!new->has_condition(new, COND_INIT_CONTACT_SEEN
) &&
2008 new->get_version(new) == IKEV1
)
2010 /* IKEv1 implicitly takes over children, IKEv2 recreates them
2012 adopt_children_and_vips(duplicate
, new);
2014 /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
2015 * peers need to complete the new SA first, otherwise the quick modes
2016 * might get lost. For IKEv2 we do the same, as we want overlapping
2017 * CHILD_SAs to keep connectivity up. */
2018 lib
->scheduler
->schedule_job(lib
->scheduler
, (job_t
*)
2019 delete_ike_sa_job_create(duplicate
->get_id(duplicate
), TRUE
), 10);
2020 DBG1(DBG_IKE
, "schedule delete of duplicate IKE_SA for peer '%Y' due "
2021 "to uniqueness policy and suspected reauthentication", other
);
2024 DBG1(DBG_IKE
, "deleting duplicate IKE_SA for peer '%Y' due to "
2025 "uniqueness policy", other
);
2026 return duplicate
->delete(duplicate
, FALSE
);
2029 METHOD(ike_sa_manager_t
, check_uniqueness
, bool,
2030 private_ike_sa_manager_t
*this, ike_sa_t
*ike_sa
, bool force_replace
)
2032 bool cancel
= FALSE
;
2033 peer_cfg_t
*peer_cfg
;
2034 unique_policy_t policy
;
2035 enumerator_t
*enumerator
;
2036 ike_sa_id_t
*id
= NULL
;
2037 identification_t
*me
, *other
;
2040 peer_cfg
= ike_sa
->get_peer_cfg(ike_sa
);
2041 policy
= peer_cfg
->get_unique_policy(peer_cfg
);
2042 if (policy
== UNIQUE_NEVER
|| (policy
== UNIQUE_NO
&& !force_replace
))
2046 me
= ike_sa
->get_my_id(ike_sa
);
2047 other
= ike_sa
->get_other_eap_id(ike_sa
);
2048 other_host
= ike_sa
->get_other_host(ike_sa
);
2050 enumerator
= create_id_enumerator(this, me
, other
,
2051 other_host
->get_family(other_host
));
2052 while (enumerator
->enumerate(enumerator
, &id
))
2054 status_t status
= SUCCESS
;
2055 ike_sa_t
*duplicate
;
2057 duplicate
= checkout(this, id
);
2064 DBG1(DBG_IKE
, "destroying duplicate IKE_SA for peer '%Y', "
2065 "received INITIAL_CONTACT", other
);
2066 charon
->bus
->ike_updown(charon
->bus
, duplicate
, FALSE
);
2067 checkin_and_destroy(this, duplicate
);
2070 peer_cfg
= duplicate
->get_peer_cfg(duplicate
);
2071 if (peer_cfg
&& peer_cfg
->equals(peer_cfg
, ike_sa
->get_peer_cfg(ike_sa
)))
2073 switch (duplicate
->get_state(duplicate
))
2075 case IKE_ESTABLISHED
:
2079 case UNIQUE_REPLACE
:
2080 status
= enforce_replace(this, duplicate
, ike_sa
,
2084 /* potential reauthentication? */
2085 if (!other_host
->equals(other_host
,
2086 duplicate
->get_other_host(duplicate
)))
2089 /* we keep the first IKE_SA and delete all
2090 * other duplicates that might exist */
2091 policy
= UNIQUE_REPLACE
;
2102 if (status
== DESTROY_ME
)
2104 checkin_and_destroy(this, duplicate
);
2108 checkin(this, duplicate
);
2111 enumerator
->destroy(enumerator
);
2112 /* reset thread's current IKE_SA after checkin */
2113 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
2117 METHOD(ike_sa_manager_t
, has_contact
, bool,
2118 private_ike_sa_manager_t
*this, identification_t
*me
,
2119 identification_t
*other
, int family
)
2126 row
= chunk_hash_inc(other
->get_encoding(other
),
2127 chunk_hash(me
->get_encoding(me
))) & this->table_mask
;
2128 segment
= row
& this->segment_mask
;
2129 lock
= this->connected_peers_segments
[segment
].lock
;
2130 lock
->read_lock(lock
);
2131 item
= this->connected_peers_table
[row
];
2134 if (connected_peers_match(item
->value
, me
, other
, family
))
2146 METHOD(ike_sa_manager_t
, get_count
, u_int
,
2147 private_ike_sa_manager_t
*this)
2149 return (u_int
)ref_cur(&this->total_sa_count
);
2152 METHOD(ike_sa_manager_t
, get_half_open_count
, u_int
,
2153 private_ike_sa_manager_t
*this, host_t
*ip
, bool responder_only
)
2163 addr
= ip
->get_address(ip
);
2164 row
= chunk_hash(addr
) & this->table_mask
;
2165 segment
= row
& this->segment_mask
;
2166 lock
= this->half_open_segments
[segment
].lock
;
2167 lock
->read_lock(lock
);
2168 item
= this->half_open_table
[row
];
2171 half_open_t
*half_open
= item
->value
;
2173 if (chunk_equals(addr
, half_open
->other
))
2175 count
= responder_only
? half_open
->count_responder
2185 count
= responder_only
? (u_int
)ref_cur(&this->half_open_count_responder
)
2186 : (u_int
)ref_cur(&this->half_open_count
);
2191 METHOD(ike_sa_manager_t
, set_spi_cb
, void,
2192 private_ike_sa_manager_t
*this, spi_cb_t callback
, void *data
)
2194 this->spi_lock
->write_lock(this->spi_lock
);
2195 this->spi_cb
.cb
= callback
;
2196 this->spi_cb
.data
= data
;
2197 this->spi_lock
->unlock(this->spi_lock
);
2201 * Destroy all entries
2203 static void destroy_all_entries(private_ike_sa_manager_t
*this)
2205 enumerator_t
*enumerator
;
2209 enumerator
= create_table_enumerator(this);
2210 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
2212 charon
->bus
->set_sa(charon
->bus
, entry
->ike_sa
);
2213 if (entry
->half_open
)
2215 remove_half_open(this, entry
);
2217 if (entry
->my_id
&& entry
->other_id
)
2219 remove_connected_peers(this, entry
);
2221 if (entry
->init_hash
.ptr
)
2223 remove_init_hash(this, entry
->init_hash
);
2225 remove_entry_at((private_enumerator_t
*)enumerator
);
2226 entry_destroy(entry
);
2228 enumerator
->destroy(enumerator
);
2229 charon
->bus
->set_sa(charon
->bus
, NULL
);
2232 METHOD(ike_sa_manager_t
, flush
, void,
2233 private_ike_sa_manager_t
*this)
2235 enumerator_t
*enumerator
;
2239 lock_all_segments(this);
2240 DBG2(DBG_MGR
, "going to destroy IKE_SA manager and all managed IKE_SA's");
2241 /* Step 1: drive out all waiting threads */
2242 DBG2(DBG_MGR
, "set driveout flags for all stored IKE_SA's");
2243 enumerator
= create_table_enumerator(this);
2244 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
2246 /* do not accept new threads, drive out waiting threads */
2247 entry
->driveout_new_threads
= TRUE
;
2248 entry
->driveout_waiting_threads
= TRUE
;
2250 enumerator
->destroy(enumerator
);
2251 DBG2(DBG_MGR
, "wait for all threads to leave IKE_SA's");
2252 /* Step 2: wait until all are gone */
2253 enumerator
= create_table_enumerator(this);
2254 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
2256 while (entry
->waiting_threads
|| entry
->checked_out
)
2259 entry
->condvar
->broadcast(entry
->condvar
);
2260 /* go sleeping until they are gone */
2261 entry
->condvar
->wait(entry
->condvar
, this->segments
[segment
].mutex
);
2264 enumerator
->destroy(enumerator
);
2265 DBG2(DBG_MGR
, "delete all IKE_SA's");
2266 /* Step 3: initiate deletion of all IKE_SAs */
2267 enumerator
= create_table_enumerator(this);
2268 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
2270 charon
->bus
->set_sa(charon
->bus
, entry
->ike_sa
);
2271 entry
->ike_sa
->delete(entry
->ike_sa
, TRUE
);
2273 enumerator
->destroy(enumerator
);
2275 DBG2(DBG_MGR
, "destroy all entries");
2276 /* Step 4: destroy all entries */
2277 destroy_all_entries(this);
2278 unlock_all_segments(this);
2280 this->spi_lock
->write_lock(this->spi_lock
);
2281 DESTROY_IF(this->rng
);
2283 this->spi_cb
.cb
= NULL
;
2284 this->spi_cb
.data
= NULL
;
2285 this->spi_lock
->unlock(this->spi_lock
);
2288 METHOD(ike_sa_manager_t
, destroy
, void,
2289 private_ike_sa_manager_t
*this)
2293 /* in case new SAs were checked in after flush() was called */
2294 lock_all_segments(this);
2295 destroy_all_entries(this);
2296 unlock_all_segments(this);
2298 free(this->ike_sa_table
);
2299 free(this->half_open_table
);
2300 free(this->connected_peers_table
);
2301 free(this->init_hashes_table
);
2302 for (i
= 0; i
< this->segment_count
; i
++)
2304 this->segments
[i
].mutex
->destroy(this->segments
[i
].mutex
);
2305 this->half_open_segments
[i
].lock
->destroy(this->half_open_segments
[i
].lock
);
2306 this->connected_peers_segments
[i
].lock
->destroy(this->connected_peers_segments
[i
].lock
);
2307 this->init_hashes_segments
[i
].mutex
->destroy(this->init_hashes_segments
[i
].mutex
);
2309 free(this->segments
);
2310 free(this->half_open_segments
);
2311 free(this->connected_peers_segments
);
2312 free(this->init_hashes_segments
);
2314 this->spi_lock
->destroy(this->spi_lock
);
2319 * This function returns the next-highest power of two for the given number.
2320 * The algorithm works by setting all bits on the right-hand side of the most
2321 * significant 1 to 1 and then increments the whole number so it rolls over
2322 * to the nearest power of two. Note: returns 0 for n == 0
2324 static u_int
get_nearest_powerof2(u_int n
)
2329 for (i
= 1; i
< sizeof(u_int
) * 8; i
<<= 1)
2337 * Described in header.
2339 ike_sa_manager_t
*ike_sa_manager_create()
2341 private_ike_sa_manager_t
*this;
2346 .checkout
= _checkout
,
2347 .checkout_new
= _checkout_new
,
2348 .checkout_by_message
= _checkout_by_message
,
2349 .checkout_by_config
= _checkout_by_config
,
2350 .checkout_by_id
= _checkout_by_id
,
2351 .checkout_by_name
= _checkout_by_name
,
2352 .new_initiator_spi
= _new_initiator_spi
,
2353 .check_uniqueness
= _check_uniqueness
,
2354 .has_contact
= _has_contact
,
2355 .create_enumerator
= _create_enumerator
,
2356 .create_id_enumerator
= _create_id_enumerator
,
2357 .checkin
= _checkin
,
2358 .checkin_and_destroy
= _checkin_and_destroy
,
2359 .get_count
= _get_count
,
2360 .get_half_open_count
= _get_half_open_count
,
2362 .set_spi_cb
= _set_spi_cb
,
2363 .destroy
= _destroy
,
2367 this->rng
= lib
->crypto
->create_rng(lib
->crypto
, RNG_WEAK
);
2368 if (this->rng
== NULL
)
2370 DBG1(DBG_MGR
, "manager initialization failed, no RNG supported");
2374 this->spi_lock
= rwlock_create(RWLOCK_TYPE_DEFAULT
);
2376 this->ikesa_limit
= lib
->settings
->get_int(lib
->settings
,
2377 "%s.ikesa_limit", 0, lib
->ns
);
2379 this->table_size
= get_nearest_powerof2(lib
->settings
->get_int(
2380 lib
->settings
, "%s.ikesa_table_size",
2381 DEFAULT_HASHTABLE_SIZE
, lib
->ns
));
2382 this->table_size
= max(1, min(this->table_size
, MAX_HASHTABLE_SIZE
));
2383 this->table_mask
= this->table_size
- 1;
2385 this->segment_count
= get_nearest_powerof2(lib
->settings
->get_int(
2386 lib
->settings
, "%s.ikesa_table_segments",
2387 DEFAULT_SEGMENT_COUNT
, lib
->ns
));
2388 this->segment_count
= max(1, min(this->segment_count
, this->table_size
));
2389 this->segment_mask
= this->segment_count
- 1;
2391 this->ike_sa_table
= calloc(this->table_size
, sizeof(table_item_t
*));
2392 this->segments
= (segment_t
*)calloc(this->segment_count
, sizeof(segment_t
));
2393 for (i
= 0; i
< this->segment_count
; i
++)
2395 this->segments
[i
].mutex
= mutex_create(MUTEX_TYPE_RECURSIVE
);
2398 /* we use the same table parameters for the table to track half-open SAs */
2399 this->half_open_table
= calloc(this->table_size
, sizeof(table_item_t
*));
2400 this->half_open_segments
= calloc(this->segment_count
, sizeof(shareable_segment_t
));
2401 for (i
= 0; i
< this->segment_count
; i
++)
2403 this->half_open_segments
[i
].lock
= rwlock_create(RWLOCK_TYPE_DEFAULT
);
2406 /* also for the hash table used for duplicate tests */
2407 this->connected_peers_table
= calloc(this->table_size
, sizeof(table_item_t
*));
2408 this->connected_peers_segments
= calloc(this->segment_count
, sizeof(shareable_segment_t
));
2409 for (i
= 0; i
< this->segment_count
; i
++)
2411 this->connected_peers_segments
[i
].lock
= rwlock_create(RWLOCK_TYPE_DEFAULT
);
2414 /* and again for the table of hashes of seen initial IKE messages */
2415 this->init_hashes_table
= calloc(this->table_size
, sizeof(table_item_t
*));
2416 this->init_hashes_segments
= calloc(this->segment_count
, sizeof(segment_t
));
2417 for (i
= 0; i
< this->segment_count
; i
++)
2419 this->init_hashes_segments
[i
].mutex
= mutex_create(MUTEX_TYPE_RECURSIVE
);
2422 this->reuse_ikesa
= lib
->settings
->get_bool(lib
->settings
,
2423 "%s.reuse_ikesa", TRUE
, lib
->ns
);
2424 return &this->public;