]>
Commit | Line | Data |
---|---|---|
a3aeddca | 1 | /* |
fcd0e88b | 2 | * $Id: radix.c,v 1.14 2001/08/03 16:55:21 wessels Exp $ |
a3aeddca | 3 | * |
4 | * DEBUG: section 53 Radix tree data structure implementation | |
5 | * AUTHOR: NetBSD Derived | |
6 | * | |
2b6662ba | 7 | * SQUID Web Proxy Cache http://www.squid-cache.org/ |
e25c139f | 8 | * ---------------------------------------------------------- |
a3aeddca | 9 | * |
2b6662ba | 10 | * Squid is the result of efforts by numerous individuals from |
11 | * the Internet community; see the CONTRIBUTORS file for full | |
12 | * details. Many organizations have provided support for Squid's | |
13 | * development; see the SPONSORS file for full details. Squid is | |
14 | * Copyrighted (C) 2001 by the Regents of the University of | |
15 | * California; see the COPYRIGHT file for full details. Squid | |
16 | * incorporates software developed and/or copyrighted by other | |
17 | * sources; see the CREDITS file for full details. | |
a3aeddca | 18 | * |
19 | * This program is free software; you can redistribute it and/or modify | |
20 | * it under the terms of the GNU General Public License as published by | |
21 | * the Free Software Foundation; either version 2 of the License, or | |
22 | * (at your option) any later version. | |
23 | * | |
24 | * This program is distributed in the hope that it will be useful, | |
25 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
26 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
27 | * GNU General Public License for more details. | |
28 | * | |
29 | * You should have received a copy of the GNU General Public License | |
30 | * along with this program; if not, write to the Free Software | |
cbdec147 | 31 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA. |
e25c139f | 32 | * |
a3aeddca | 33 | */ |
34 | ||
35 | ||
76f729d7 | 36 | /* |
37 | * Copyright (c) 1988, 1989, 1993 | |
a3aeddca | 38 | * The Regents of the University of California. All rights reserved. |
76f729d7 | 39 | * |
40 | * Redistribution and use in source and binary forms, with or without | |
41 | * modification, are permitted provided that the following conditions | |
42 | * are met: | |
43 | * 1. Redistributions of source code must retain the above copyright | |
44 | * notice, this list of conditions and the following disclaimer. | |
45 | * 2. Redistributions in binary form must reproduce the above copyright | |
46 | * notice, this list of conditions and the following disclaimer in the | |
47 | * documentation and/or other materials provided with the distribution. | |
48 | * 3. All advertising materials mentioning features or use of this software | |
49 | * must display the following acknowledgement: | |
a3aeddca | 50 | * This product includes software developed by the University of |
51 | * California, Berkeley and its contributors. | |
76f729d7 | 52 | * 4. Neither the name of the University nor the names of its contributors |
53 | * may be used to endorse or promote products derived from this software | |
54 | * without specific prior written permission. | |
55 | * | |
56 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
57 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
58 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
59 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
60 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
61 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
62 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
63 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
64 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
65 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
66 | * SUCH DAMAGE. | |
67 | * | |
a3aeddca | 68 | * @(#)radix.c 8.4 (Berkeley) 11/2/94 |
76f729d7 | 69 | */ |
70 | ||
76f729d7 | 71 | #include "config.h" |
72 | ||
a3aeddca | 73 | #if HAVE_UNISTD_H |
74 | #include <unistd.h> | |
75 | #endif | |
76 | #if HAVE_STDLIB_H | |
76f729d7 | 77 | #include <stdlib.h> |
76f729d7 | 78 | #endif |
a3aeddca | 79 | #if HAVE_STDIO_H |
80 | #include <stdio.h> | |
81 | #endif | |
82 | #if HAVE_SYS_TYPES_H | |
83 | #include <sys/types.h> | |
84 | #endif | |
85 | #if HAVE_CTYPE_H | |
86 | #include <ctype.h> | |
87 | #endif | |
88 | #if HAVE_ERRNO_H | |
89 | #include <errno.h> | |
90 | #endif | |
91 | #if HAVE_FCNTL_H | |
92 | #include <fcntl.h> | |
93 | #endif | |
94 | #if HAVE_GRP_H | |
95 | #include <grp.h> | |
76f729d7 | 96 | #endif |
a3aeddca | 97 | #if HAVE_GNUMALLOC_H |
98 | #include <gnumalloc.h> | |
99 | #elif HAVE_MALLOC_H && !defined(_SQUID_FREEBSD_) && !defined(_SQUID_NEXT_) | |
100 | #include <malloc.h> | |
101 | #endif | |
102 | #if HAVE_MEMORY_H | |
103 | #include <memory.h> | |
104 | #endif | |
105 | #if HAVE_SYS_PARAM_H | |
106 | #include <sys/param.h> | |
107 | #endif | |
91515c1a | 108 | #if HAVE_ASSERT_H |
109 | #include <assert.h> | |
110 | #endif | |
111 | ||
a3aeddca | 112 | #include "util.h" |
113 | ||
114 | #include "radix.h" | |
76f729d7 | 115 | |
a3aeddca | 116 | int max_keylen; |
76f729d7 | 117 | struct radix_mask *rn_mkfreelist; |
118 | struct radix_node_head *mask_rnhead; | |
119 | static char *addmask_key; | |
922c4930 | 120 | static unsigned char normal_chars[] = |
c68e9c6b | 121 | {0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, 0xFF}; |
76f729d7 | 122 | static char *rn_zeros, *rn_ones; |
123 | ||
124 | #define rn_masktop (mask_rnhead->rnh_treetop) | |
125 | #undef Bcmp | |
a3aeddca | 126 | #define Bcmp(a, b, l) (l == 0 ? 0 : memcmp((caddr_t)(a), (caddr_t)(b), (u_long)l)) |
76f729d7 | 127 | /* |
128 | * The data structure for the keys is a radix tree with one way | |
129 | * branching removed. The index rn_b at an internal node n represents a bit | |
130 | * position to be tested. The tree is arranged so that all descendants | |
131 | * of a node n have keys whose bits all agree up to position rn_b - 1. | |
132 | * (We say the index of n is rn_b.) | |
133 | * | |
134 | * There is at least one descendant which has a one bit at position rn_b, | |
135 | * and at least one with a zero there. | |
136 | * | |
137 | * A route is determined by a pair of key and mask. We require that the | |
138 | * bit-wise logical and of the key and mask to be the key. | |
139 | * We define the index of a route to associated with the mask to be | |
140 | * the first bit number in the mask where 0 occurs (with bit number 0 | |
141 | * representing the highest order bit). | |
142 | * | |
143 | * We say a mask is normal if every bit is 0, past the index of the mask. | |
144 | * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b, | |
145 | * and m is a normal mask, then the route applies to every descendant of n. | |
146 | * If the index(m) < rn_b, this implies the trailing last few bits of k | |
147 | * before bit b are all 0, (and hence consequently true of every descendant | |
148 | * of n), so the route applies to all descendants of the node as well. | |
149 | * | |
150 | * Similar logic shows that a non-normal mask m such that | |
151 | * index(m) <= index(n) could potentially apply to many children of n. | |
152 | * Thus, for each non-host route, we attach its mask to a list at an internal | |
153 | * node as high in the tree as we can go. | |
154 | * | |
155 | * The present version of the code makes use of normal routes in short- | |
156 | * circuiting an explict mask and compare operation when testing whether | |
157 | * a key satisfies a normal route, and also in remembering the unique leaf | |
158 | * that governs a subtree. | |
159 | */ | |
160 | ||
161 | struct radix_node * | |
162 | rn_search(v_arg, head) | |
a3aeddca | 163 | void *v_arg; |
164 | struct radix_node *head; | |
76f729d7 | 165 | { |
a3aeddca | 166 | register struct radix_node *x; |
167 | register caddr_t v; | |
168 | ||
169 | for (x = head, v = v_arg; x->rn_b >= 0;) { | |
170 | if (x->rn_bmask & v[x->rn_off]) | |
171 | x = x->rn_r; | |
172 | else | |
173 | x = x->rn_l; | |
174 | } | |
175 | return (x); | |
922c4930 | 176 | } |
76f729d7 | 177 | |
178 | struct radix_node * | |
179 | rn_search_m(v_arg, head, m_arg) | |
a3aeddca | 180 | struct radix_node *head; |
181 | void *v_arg, *m_arg; | |
76f729d7 | 182 | { |
a3aeddca | 183 | register struct radix_node *x; |
184 | register caddr_t v = v_arg, m = m_arg; | |
185 | ||
186 | for (x = head; x->rn_b >= 0;) { | |
187 | if ((x->rn_bmask & m[x->rn_off]) && | |
188 | (x->rn_bmask & v[x->rn_off])) | |
189 | x = x->rn_r; | |
190 | else | |
191 | x = x->rn_l; | |
192 | } | |
193 | return x; | |
922c4930 | 194 | } |
76f729d7 | 195 | |
196 | int | |
197 | rn_refines(m_arg, n_arg) | |
a3aeddca | 198 | void *m_arg, *n_arg; |
76f729d7 | 199 | { |
a3aeddca | 200 | register caddr_t m = m_arg, n = n_arg; |
201 | register caddr_t lim, lim2 = lim = n + *(u_char *) n; | |
202 | int longer = (*(u_char *) n++) - (int) (*(u_char *) m++); | |
203 | int masks_are_equal = 1; | |
204 | ||
205 | if (longer > 0) | |
206 | lim -= longer; | |
207 | while (n < lim) { | |
208 | if (*n & ~(*m)) | |
209 | return 0; | |
210 | if (*n++ != *m++) | |
211 | masks_are_equal = 0; | |
212 | } | |
213 | while (n < lim2) | |
214 | if (*n++) | |
215 | return 0; | |
216 | if (masks_are_equal && (longer < 0)) | |
217 | for (lim2 = m - longer; m < lim2;) | |
218 | if (*m++) | |
219 | return 1; | |
220 | return (!masks_are_equal); | |
76f729d7 | 221 | } |
222 | ||
48ebcb22 | 223 | struct radix_node * |
76f729d7 | 224 | rn_lookup(v_arg, m_arg, head) |
a3aeddca | 225 | void *v_arg, *m_arg; |
226 | struct radix_node_head *head; | |
76f729d7 | 227 | { |
a3aeddca | 228 | register struct radix_node *x; |
229 | caddr_t netmask = 0; | |
76f729d7 | 230 | |
a3aeddca | 231 | if (m_arg) { |
232 | if ((x = rn_addmask(m_arg, 1, head->rnh_treetop->rn_off)) == 0) | |
233 | return (0); | |
234 | netmask = x->rn_key; | |
235 | } | |
236 | x = rn_match(v_arg, head); | |
237 | if (x && netmask) { | |
238 | while (x && x->rn_mask != netmask) | |
239 | x = x->rn_dupedkey; | |
240 | } | |
241 | return x; | |
76f729d7 | 242 | } |
243 | ||
244 | static | |
164f7660 | 245 | int |
a3aeddca | 246 | rn_satsifies_leaf(trial, leaf, skip) |
247 | char *trial; | |
248 | register struct radix_node *leaf; | |
249 | int skip; | |
76f729d7 | 250 | { |
a3aeddca | 251 | register char *cp = trial, *cp2 = leaf->rn_key, *cp3 = leaf->rn_mask; |
252 | char *cplim; | |
253 | int length = min(*(u_char *) cp, *(u_char *) cp2); | |
254 | ||
255 | if (cp3 == 0) | |
256 | cp3 = rn_ones; | |
257 | else | |
258 | length = min(length, *(u_char *) cp3); | |
259 | cplim = cp + length; | |
260 | cp3 += skip; | |
261 | cp2 += skip; | |
262 | for (cp += skip; cp < cplim; cp++, cp2++, cp3++) | |
263 | if ((*cp ^ *cp2) & *cp3) | |
264 | return 0; | |
265 | return 1; | |
76f729d7 | 266 | } |
267 | ||
268 | struct radix_node * | |
269 | rn_match(v_arg, head) | |
a3aeddca | 270 | void *v_arg; |
271 | struct radix_node_head *head; | |
76f729d7 | 272 | { |
a3aeddca | 273 | caddr_t v = v_arg; |
274 | register struct radix_node *t = head->rnh_treetop, *x; | |
275 | register caddr_t cp = v, cp2; | |
276 | caddr_t cplim; | |
277 | struct radix_node *saved_t, *top = t; | |
278 | int off = t->rn_off, vlen = *(u_char *) cp, matched_off; | |
279 | register int test, b, rn_b; | |
280 | ||
281 | /* | |
282 | * Open code rn_search(v, top) to avoid overhead of extra | |
283 | * subroutine call. | |
284 | */ | |
285 | for (; t->rn_b >= 0;) { | |
286 | if (t->rn_bmask & cp[t->rn_off]) | |
287 | t = t->rn_r; | |
288 | else | |
289 | t = t->rn_l; | |
290 | } | |
291 | /* | |
292 | * See if we match exactly as a host destination | |
293 | * or at least learn how many bits match, for normal mask finesse. | |
294 | * | |
295 | * It doesn't hurt us to limit how many bytes to check | |
296 | * to the length of the mask, since if it matches we had a genuine | |
297 | * match and the leaf we have is the most specific one anyway; | |
298 | * if it didn't match with a shorter length it would fail | |
299 | * with a long one. This wins big for class B&C netmasks which | |
300 | * are probably the most common case... | |
301 | */ | |
302 | if (t->rn_mask) | |
303 | vlen = *(u_char *) t->rn_mask; | |
304 | cp += off; | |
305 | cp2 = t->rn_key + off; | |
306 | cplim = v + vlen; | |
307 | for (; cp < cplim; cp++, cp2++) | |
308 | if (*cp != *cp2) | |
309 | goto on1; | |
310 | /* | |
311 | * This extra grot is in case we are explicitly asked | |
312 | * to look up the default. Ugh! | |
313 | */ | |
314 | if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey) | |
315 | t = t->rn_dupedkey; | |
316 | return t; | |
317 | on1: | |
318 | test = (*cp ^ *cp2) & 0xff; /* find first bit that differs */ | |
319 | for (b = 7; (test >>= 1) > 0;) | |
320 | b--; | |
321 | matched_off = cp - v; | |
322 | b += matched_off << 3; | |
323 | rn_b = -1 - b; | |
324 | /* | |
325 | * If there is a host route in a duped-key chain, it will be first. | |
326 | */ | |
327 | if ((saved_t = t)->rn_mask == 0) | |
328 | t = t->rn_dupedkey; | |
329 | for (; t; t = t->rn_dupedkey) | |
76f729d7 | 330 | /* |
a3aeddca | 331 | * Even if we don't match exactly as a host, |
332 | * we may match if the leaf we wound up at is | |
333 | * a route to a net. | |
76f729d7 | 334 | */ |
a3aeddca | 335 | if (t->rn_flags & RNF_NORMAL) { |
336 | if (rn_b <= t->rn_b) | |
337 | return t; | |
338 | } else if (rn_satsifies_leaf(v, t, matched_off)) | |
339 | return t; | |
340 | t = saved_t; | |
341 | /* start searching up the tree */ | |
342 | do { | |
343 | register struct radix_mask *m; | |
344 | t = t->rn_p; | |
345 | if ((m = t->rn_mklist)) { | |
346 | /* | |
347 | * If non-contiguous masks ever become important | |
348 | * we can restore the masking and open coding of | |
349 | * the search and satisfaction test and put the | |
350 | * calculation of "off" back before the "do". | |
351 | */ | |
352 | do { | |
353 | if (m->rm_flags & RNF_NORMAL) { | |
354 | if (rn_b <= m->rm_b) | |
355 | return (m->rm_leaf); | |
356 | } else { | |
357 | off = min(t->rn_off, matched_off); | |
358 | x = rn_search_m(v, t, m->rm_mask); | |
359 | while (x && x->rn_mask != m->rm_mask) | |
360 | x = x->rn_dupedkey; | |
361 | if (x && rn_satsifies_leaf(v, x, off)) | |
362 | return x; | |
76f729d7 | 363 | } |
a3aeddca | 364 | } while ((m = m->rm_mklist)); |
365 | } | |
366 | } while (t != top); | |
367 | return 0; | |
922c4930 | 368 | } |
a3aeddca | 369 | |
76f729d7 | 370 | #ifdef RN_DEBUG |
a3aeddca | 371 | int rn_nodenum; |
372 | struct radix_node *rn_clist; | |
373 | int rn_saveinfo; | |
374 | int rn_debug = 1; | |
76f729d7 | 375 | #endif |
376 | ||
377 | struct radix_node * | |
378 | rn_newpair(v, b, nodes) | |
a3aeddca | 379 | void *v; |
380 | int b; | |
381 | struct radix_node nodes[2]; | |
76f729d7 | 382 | { |
a3aeddca | 383 | register struct radix_node *tt = nodes, *t = tt + 1; |
384 | t->rn_b = b; | |
385 | t->rn_bmask = 0x80 >> (b & 7); | |
386 | t->rn_l = tt; | |
387 | t->rn_off = b >> 3; | |
388 | tt->rn_b = -1; | |
389 | tt->rn_key = (caddr_t) v; | |
390 | tt->rn_p = t; | |
391 | tt->rn_flags = t->rn_flags = RNF_ACTIVE; | |
76f729d7 | 392 | #ifdef RN_DEBUG |
a3aeddca | 393 | tt->rn_info = rn_nodenum++; |
394 | t->rn_info = rn_nodenum++; | |
395 | tt->rn_twin = t; | |
396 | tt->rn_ybro = rn_clist; | |
397 | rn_clist = tt; | |
76f729d7 | 398 | #endif |
a3aeddca | 399 | return t; |
76f729d7 | 400 | } |
401 | ||
402 | struct radix_node * | |
403 | rn_insert(v_arg, head, dupentry, nodes) | |
a3aeddca | 404 | void *v_arg; |
405 | struct radix_node_head *head; | |
406 | int *dupentry; | |
407 | struct radix_node nodes[2]; | |
76f729d7 | 408 | { |
a3aeddca | 409 | caddr_t v = v_arg; |
410 | struct radix_node *top = head->rnh_treetop; | |
411 | int head_off = top->rn_off, vlen = (int) *((u_char *) v); | |
412 | register struct radix_node *t = rn_search(v_arg, top); | |
413 | register caddr_t cp = v + head_off; | |
414 | register int b; | |
415 | struct radix_node *tt; | |
416 | /* | |
417 | * Find first bit at which v and t->rn_key differ | |
418 | */ | |
76f729d7 | 419 | { |
420 | register caddr_t cp2 = t->rn_key + head_off; | |
421 | register int cmp_res; | |
422 | caddr_t cplim = v + vlen; | |
423 | ||
424 | while (cp < cplim) | |
a3aeddca | 425 | if (*cp2++ != *cp++) |
426 | goto on1; | |
76f729d7 | 427 | *dupentry = 1; |
428 | return t; | |
a3aeddca | 429 | on1: |
76f729d7 | 430 | *dupentry = 0; |
431 | cmp_res = (cp[-1] ^ cp2[-1]) & 0xff; | |
432 | for (b = (cp - v) << 3; cmp_res; b--) | |
a3aeddca | 433 | cmp_res >>= 1; |
76f729d7 | 434 | } |
435 | { | |
436 | register struct radix_node *p, *x = top; | |
437 | cp = v; | |
438 | do { | |
a3aeddca | 439 | p = x; |
440 | if (cp[x->rn_off] & x->rn_bmask) | |
441 | x = x->rn_r; | |
442 | else | |
443 | x = x->rn_l; | |
444 | } while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */ | |
76f729d7 | 445 | #ifdef RN_DEBUG |
446 | if (rn_debug) | |
a3aeddca | 447 | fprintf(stderr, "rn_insert: Going In:\n"); |
448 | traverse(p); | |
76f729d7 | 449 | #endif |
a3aeddca | 450 | t = rn_newpair(v_arg, b, nodes); |
451 | tt = t->rn_l; | |
76f729d7 | 452 | if ((cp[p->rn_off] & p->rn_bmask) == 0) |
a3aeddca | 453 | p->rn_l = t; |
76f729d7 | 454 | else |
a3aeddca | 455 | p->rn_r = t; |
456 | x->rn_p = t; | |
457 | t->rn_p = p; /* frees x, p as temp vars below */ | |
76f729d7 | 458 | if ((cp[t->rn_off] & t->rn_bmask) == 0) { |
a3aeddca | 459 | t->rn_r = x; |
76f729d7 | 460 | } else { |
a3aeddca | 461 | t->rn_r = tt; |
462 | t->rn_l = x; | |
76f729d7 | 463 | } |
464 | #ifdef RN_DEBUG | |
465 | if (rn_debug) | |
a3aeddca | 466 | log(LOG_DEBUG, "rn_insert: Coming Out:\n"), traverse(p); |
76f729d7 | 467 | #endif |
468 | } | |
a3aeddca | 469 | return (tt); |
76f729d7 | 470 | } |
471 | ||
472 | struct radix_node * | |
473 | rn_addmask(n_arg, search, skip) | |
a3aeddca | 474 | int search, skip; |
475 | void *n_arg; | |
76f729d7 | 476 | { |
a3aeddca | 477 | caddr_t netmask = (caddr_t) n_arg; |
478 | register struct radix_node *x; | |
479 | register caddr_t cp, cplim; | |
480 | register int b = 0, mlen, j; | |
481 | int maskduplicated, m0, isnormal; | |
482 | struct radix_node *saved_x; | |
483 | static int last_zeroed = 0; | |
484 | ||
485 | if ((mlen = *(u_char *) netmask) > max_keylen) | |
486 | mlen = max_keylen; | |
487 | if (skip == 0) | |
488 | skip = 1; | |
489 | if (mlen <= skip) | |
490 | return (mask_rnhead->rnh_nodes); | |
491 | if (skip > 1) | |
492 | memcpy(addmask_key + 1, rn_ones + 1, skip - 1); | |
493 | if ((m0 = mlen) > skip) | |
494 | memcpy(addmask_key + skip, netmask + skip, mlen - skip); | |
495 | /* | |
496 | * Trim trailing zeroes. | |
497 | */ | |
498 | for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0;) | |
499 | cp--; | |
500 | mlen = cp - addmask_key; | |
501 | if (mlen <= skip) { | |
502 | if (m0 >= last_zeroed) | |
503 | last_zeroed = mlen; | |
504 | return (mask_rnhead->rnh_nodes); | |
505 | } | |
506 | if (m0 < last_zeroed) | |
507 | memset(addmask_key + m0, '\0', last_zeroed - m0); | |
508 | *addmask_key = last_zeroed = mlen; | |
509 | x = rn_search(addmask_key, rn_masktop); | |
510 | if (memcmp(addmask_key, x->rn_key, mlen) != 0) | |
511 | x = 0; | |
512 | if (x || search) | |
513 | return (x); | |
514 | R_Malloc(x, struct radix_node *, max_keylen + 2 * sizeof(*x)); | |
515 | if ((saved_x = x) == 0) | |
516 | return (0); | |
517 | memset(x, '\0', max_keylen + 2 * sizeof(*x)); | |
518 | netmask = cp = (caddr_t) (x + 2); | |
519 | memcpy(cp, addmask_key, mlen); | |
520 | x = rn_insert(cp, mask_rnhead, &maskduplicated, x); | |
521 | if (maskduplicated) { | |
522 | fprintf(stderr, "rn_addmask: mask impossibly already in tree"); | |
523 | Free(saved_x); | |
76f729d7 | 524 | return (x); |
a3aeddca | 525 | } |
526 | /* | |
527 | * Calculate index of mask, and check for normalcy. | |
528 | */ | |
529 | cplim = netmask + mlen; | |
530 | isnormal = 1; | |
531 | for (cp = netmask + skip; (cp < cplim) && *(u_char *) cp == 0xff;) | |
532 | cp++; | |
533 | if (cp != cplim) { | |
534 | for (j = 0x80; (j & *cp) != 0; j >>= 1) | |
535 | b++; | |
536 | if (*cp != normal_chars[b] || cp != (cplim - 1)) | |
537 | isnormal = 0; | |
538 | } | |
539 | b += (cp - netmask) << 3; | |
540 | x->rn_b = -1 - b; | |
541 | if (isnormal) | |
542 | x->rn_flags |= RNF_NORMAL; | |
543 | return (x); | |
76f729d7 | 544 | } |
545 | ||
a3aeddca | 546 | static int /* XXX: arbitrary ordering for non-contiguous masks */ |
76f729d7 | 547 | rn_lexobetter(m_arg, n_arg) |
a3aeddca | 548 | void *m_arg, *n_arg; |
76f729d7 | 549 | { |
a3aeddca | 550 | register u_char *mp = m_arg, *np = n_arg, *lim; |
551 | ||
552 | if (*mp > *np) | |
553 | return 1; /* not really, but need to check longer one first */ | |
554 | if (*mp == *np) | |
555 | for (lim = mp + *mp; mp < lim;) | |
556 | if (*mp++ > *np++) | |
557 | return 1; | |
558 | return 0; | |
76f729d7 | 559 | } |
560 | ||
561 | static struct radix_mask * | |
562 | rn_new_radix_mask(tt, next) | |
a3aeddca | 563 | register struct radix_node *tt; |
564 | register struct radix_mask *next; | |
76f729d7 | 565 | { |
a3aeddca | 566 | register struct radix_mask *m; |
76f729d7 | 567 | |
a3aeddca | 568 | MKGet(m); |
569 | if (m == 0) { | |
570 | fprintf(stderr, "Mask for route not entered\n"); | |
571 | return (0); | |
572 | } | |
573 | memset(m, '\0', sizeof *m); | |
574 | m->rm_b = tt->rn_b; | |
575 | m->rm_flags = tt->rn_flags; | |
576 | if (tt->rn_flags & RNF_NORMAL) | |
577 | m->rm_leaf = tt; | |
578 | else | |
579 | m->rm_mask = tt->rn_mask; | |
580 | m->rm_mklist = next; | |
581 | tt->rn_mklist = m; | |
582 | return m; | |
76f729d7 | 583 | } |
584 | ||
585 | struct radix_node * | |
586 | rn_addroute(v_arg, n_arg, head, treenodes) | |
a3aeddca | 587 | void *v_arg, *n_arg; |
588 | struct radix_node_head *head; | |
589 | struct radix_node treenodes[2]; | |
76f729d7 | 590 | { |
a3aeddca | 591 | caddr_t v = (caddr_t) v_arg, netmask = (caddr_t) n_arg; |
592 | register struct radix_node *t, *x = NULL, *tt; | |
593 | struct radix_node *saved_tt, *top = head->rnh_treetop; | |
594 | short b = 0, b_leaf = 0; | |
595 | int keyduplicated; | |
596 | caddr_t mmask; | |
597 | struct radix_mask *m, **mp; | |
598 | ||
599 | /* | |
600 | * In dealing with non-contiguous masks, there may be | |
601 | * many different routes which have the same mask. | |
602 | * We will find it useful to have a unique pointer to | |
603 | * the mask to speed avoiding duplicate references at | |
604 | * nodes and possibly save time in calculating indices. | |
605 | */ | |
606 | if (netmask) { | |
607 | if ((x = rn_addmask(netmask, 0, top->rn_off)) == 0) | |
608 | return (0); | |
609 | b_leaf = x->rn_b; | |
610 | b = -1 - x->rn_b; | |
611 | netmask = x->rn_key; | |
612 | } | |
613 | /* | |
614 | * Deal with duplicated keys: attach node to previous instance | |
615 | */ | |
616 | saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes); | |
617 | if (keyduplicated) { | |
618 | for (t = tt; tt; t = tt, tt = tt->rn_dupedkey) { | |
619 | if (tt->rn_mask == netmask) | |
620 | return (0); | |
621 | if (netmask == 0 || | |
622 | (tt->rn_mask && | |
623 | ((b_leaf < tt->rn_b) || /* index(netmask) > node */ | |
624 | rn_refines(netmask, tt->rn_mask) || | |
625 | rn_lexobetter(netmask, tt->rn_mask)))) | |
626 | break; | |
76f729d7 | 627 | } |
628 | /* | |
a3aeddca | 629 | * If the mask is not duplicated, we wouldn't |
630 | * find it among possible duplicate key entries | |
631 | * anyway, so the above test doesn't hurt. | |
632 | * | |
633 | * We sort the masks for a duplicated key the same way as | |
634 | * in a masklist -- most specific to least specific. | |
635 | * This may require the unfortunate nuisance of relocating | |
636 | * the head of the list. | |
76f729d7 | 637 | */ |
a3aeddca | 638 | if (tt == saved_tt) { |
639 | struct radix_node *xx = x; | |
640 | /* link in at head of list */ | |
641 | (tt = treenodes)->rn_dupedkey = t; | |
642 | tt->rn_flags = t->rn_flags; | |
643 | tt->rn_p = x = t->rn_p; | |
644 | if (x->rn_l == t) | |
645 | x->rn_l = tt; | |
646 | else | |
647 | x->rn_r = tt; | |
648 | saved_tt = tt; | |
649 | x = xx; | |
650 | } else { | |
651 | (tt = treenodes)->rn_dupedkey = t->rn_dupedkey; | |
652 | t->rn_dupedkey = tt; | |
653 | } | |
76f729d7 | 654 | #ifdef RN_DEBUG |
a3aeddca | 655 | t = tt + 1; |
656 | tt->rn_info = rn_nodenum++; | |
657 | t->rn_info = rn_nodenum++; | |
658 | tt->rn_twin = t; | |
659 | tt->rn_ybro = rn_clist; | |
660 | rn_clist = tt; | |
76f729d7 | 661 | #endif |
a3aeddca | 662 | tt->rn_key = (caddr_t) v; |
663 | tt->rn_b = -1; | |
664 | tt->rn_flags = RNF_ACTIVE; | |
665 | } | |
666 | /* | |
667 | * Put mask in tree. | |
668 | */ | |
669 | if (netmask) { | |
670 | tt->rn_mask = netmask; | |
671 | tt->rn_b = x->rn_b; | |
672 | tt->rn_flags |= x->rn_flags & RNF_NORMAL; | |
673 | } | |
674 | t = saved_tt->rn_p; | |
675 | if (keyduplicated) | |
676 | goto on2; | |
677 | b_leaf = -1 - t->rn_b; | |
678 | if (t->rn_r == saved_tt) | |
679 | x = t->rn_l; | |
680 | else | |
681 | x = t->rn_r; | |
682 | /* Promote general routes from below */ | |
683 | if (x->rn_b < 0) { | |
684 | for (mp = &t->rn_mklist; x; x = x->rn_dupedkey) | |
685 | if (x->rn_mask && (x->rn_b >= b_leaf) && x->rn_mklist == 0) { | |
686 | if ((*mp = m = rn_new_radix_mask(x, 0))) | |
687 | mp = &m->rm_mklist; | |
688 | } | |
689 | } else if (x->rn_mklist) { | |
76f729d7 | 690 | /* |
a3aeddca | 691 | * Skip over masks whose index is > that of new node |
76f729d7 | 692 | */ |
a3aeddca | 693 | for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) |
694 | if (m->rm_b >= b_leaf) | |
695 | break; | |
696 | t->rn_mklist = m; | |
697 | *mp = 0; | |
698 | } | |
699 | on2: | |
700 | /* Add new route to highest possible ancestor's list */ | |
701 | if ((netmask == 0) || (b > t->rn_b)) | |
702 | return tt; /* can't lift at all */ | |
703 | b_leaf = tt->rn_b; | |
704 | do { | |
705 | x = t; | |
706 | t = t->rn_p; | |
707 | } while (b <= t->rn_b && x != top); | |
708 | /* | |
709 | * Search through routes associated with node to | |
710 | * insert new route according to index. | |
711 | * Need same criteria as when sorting dupedkeys to avoid | |
712 | * double loop on deletion. | |
713 | */ | |
714 | for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) { | |
715 | if (m->rm_b < b_leaf) | |
716 | continue; | |
717 | if (m->rm_b > b_leaf) | |
718 | break; | |
719 | if (m->rm_flags & RNF_NORMAL) { | |
720 | mmask = m->rm_leaf->rn_mask; | |
721 | if (tt->rn_flags & RNF_NORMAL) { | |
722 | fprintf(stderr, | |
723 | "Non-unique normal route, mask not entered"); | |
724 | return tt; | |
725 | } | |
726 | } else | |
727 | mmask = m->rm_mask; | |
728 | if (mmask == netmask) { | |
729 | m->rm_refs++; | |
730 | tt->rn_mklist = m; | |
731 | return tt; | |
76f729d7 | 732 | } |
a3aeddca | 733 | if (rn_refines(netmask, mmask) || rn_lexobetter(netmask, mmask)) |
734 | break; | |
735 | } | |
736 | *mp = rn_new_radix_mask(tt, *mp); | |
737 | return tt; | |
76f729d7 | 738 | } |
739 | ||
740 | struct radix_node * | |
741 | rn_delete(v_arg, netmask_arg, head) | |
a3aeddca | 742 | void *v_arg, *netmask_arg; |
743 | struct radix_node_head *head; | |
76f729d7 | 744 | { |
a3aeddca | 745 | register struct radix_node *t, *p, *x, *tt; |
746 | struct radix_mask *m, *saved_m, **mp; | |
747 | struct radix_node *dupedkey, *saved_tt, *top; | |
748 | caddr_t v, netmask; | |
749 | int b, head_off, vlen; | |
750 | ||
751 | v = v_arg; | |
752 | netmask = netmask_arg; | |
753 | x = head->rnh_treetop; | |
754 | tt = rn_search(v, x); | |
755 | head_off = x->rn_off; | |
756 | vlen = *(u_char *) v; | |
757 | saved_tt = tt; | |
758 | top = x; | |
759 | if (tt == 0 || | |
760 | memcmp(v + head_off, tt->rn_key + head_off, vlen - head_off)) | |
761 | return (0); | |
762 | /* | |
763 | * Delete our route from mask lists. | |
764 | */ | |
765 | if (netmask) { | |
766 | if ((x = rn_addmask(netmask, 1, head_off)) == 0) | |
767 | return (0); | |
768 | netmask = x->rn_key; | |
769 | while (tt->rn_mask != netmask) | |
770 | if ((tt = tt->rn_dupedkey) == 0) | |
76f729d7 | 771 | return (0); |
a3aeddca | 772 | } |
773 | if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0) | |
774 | goto on1; | |
775 | if (tt->rn_flags & RNF_NORMAL) { | |
776 | if (m->rm_leaf != tt || m->rm_refs > 0) { | |
777 | fprintf(stderr, "rn_delete: inconsistent annotation\n"); | |
778 | return 0; /* dangling ref could cause disaster */ | |
76f729d7 | 779 | } |
a3aeddca | 780 | } else { |
781 | if (m->rm_mask != tt->rn_mask) { | |
782 | fprintf(stderr, "rn_delete: inconsistent annotation\n"); | |
783 | goto on1; | |
76f729d7 | 784 | } |
a3aeddca | 785 | if (--m->rm_refs >= 0) |
786 | goto on1; | |
787 | } | |
788 | b = -1 - tt->rn_b; | |
789 | t = saved_tt->rn_p; | |
790 | if (b > t->rn_b) | |
791 | goto on1; /* Wasn't lifted at all */ | |
792 | do { | |
793 | x = t; | |
794 | t = t->rn_p; | |
795 | } while (b <= t->rn_b && x != top); | |
796 | for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) | |
797 | if (m == saved_m) { | |
798 | *mp = m->rm_mklist; | |
799 | MKFree(m); | |
800 | break; | |
76f729d7 | 801 | } |
a3aeddca | 802 | if (m == 0) { |
803 | fprintf(stderr, "rn_delete: couldn't find our annotation\n"); | |
804 | if (tt->rn_flags & RNF_NORMAL) | |
805 | return (0); /* Dangling ref to us */ | |
806 | } | |
807 | on1: | |
808 | /* | |
809 | * Eliminate us from tree | |
810 | */ | |
811 | if (tt->rn_flags & RNF_ROOT) | |
812 | return (0); | |
76f729d7 | 813 | #ifdef RN_DEBUG |
a3aeddca | 814 | /* Get us out of the creation list */ |
815 | for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) { | |
816 | } | |
817 | if (t) | |
818 | t->rn_ybro = tt->rn_ybro; | |
76f729d7 | 819 | #endif |
a3aeddca | 820 | t = tt->rn_p; |
821 | if ((dupedkey = saved_tt->rn_dupedkey)) { | |
822 | if (tt == saved_tt) { | |
823 | x = dupedkey; | |
824 | x->rn_p = t; | |
825 | if (t->rn_l == tt) | |
826 | t->rn_l = x; | |
827 | else | |
828 | t->rn_r = x; | |
829 | } else { | |
830 | for (x = p = saved_tt; p && p->rn_dupedkey != tt;) | |
831 | p = p->rn_dupedkey; | |
832 | if (p) | |
833 | p->rn_dupedkey = tt->rn_dupedkey; | |
834 | else | |
835 | fprintf(stderr, "rn_delete: couldn't find us\n"); | |
836 | } | |
837 | t = tt + 1; | |
838 | if (t->rn_flags & RNF_ACTIVE) { | |
76f729d7 | 839 | #ifndef RN_DEBUG |
a3aeddca | 840 | *++x = *t; |
841 | p = t->rn_p; | |
76f729d7 | 842 | #else |
a3aeddca | 843 | b = t->rn_info; |
844 | *++x = *t; | |
845 | t->rn_info = b; | |
846 | p = t->rn_p; | |
76f729d7 | 847 | #endif |
a3aeddca | 848 | if (p->rn_l == t) |
849 | p->rn_l = x; | |
850 | else | |
851 | p->rn_r = x; | |
852 | x->rn_l->rn_p = x; | |
853 | x->rn_r->rn_p = x; | |
76f729d7 | 854 | } |
a3aeddca | 855 | goto out; |
856 | } | |
857 | if (t->rn_l == tt) | |
858 | x = t->rn_r; | |
859 | else | |
860 | x = t->rn_l; | |
861 | p = t->rn_p; | |
862 | if (p->rn_r == t) | |
863 | p->rn_r = x; | |
864 | else | |
865 | p->rn_l = x; | |
866 | x->rn_p = p; | |
867 | /* | |
868 | * Demote routes attached to us. | |
869 | */ | |
870 | if (t->rn_mklist) { | |
871 | if (x->rn_b >= 0) { | |
872 | for (mp = &x->rn_mklist; (m = *mp);) | |
873 | mp = &m->rm_mklist; | |
874 | *mp = t->rn_mklist; | |
875 | } else { | |
876 | /* If there are any key,mask pairs in a sibling | |
877 | * duped-key chain, some subset will appear sorted | |
878 | * in the same order attached to our mklist */ | |
879 | for (m = t->rn_mklist; m && x; x = x->rn_dupedkey) | |
880 | if (m == x->rn_mklist) { | |
881 | struct radix_mask *mm = m->rm_mklist; | |
882 | x->rn_mklist = 0; | |
883 | if (--(m->rm_refs) < 0) | |
884 | MKFree(m); | |
885 | m = mm; | |
76f729d7 | 886 | } |
91515c1a | 887 | #if RN_DEBUG |
a3aeddca | 888 | if (m) |
889 | fprintf(stderr, "%s %x at %x\n", | |
890 | "rn_delete: Orphaned Mask", (int) m, (int) x); | |
91515c1a | 891 | #else |
892 | assert(m == NULL); | |
893 | #endif | |
76f729d7 | 894 | } |
a3aeddca | 895 | } |
896 | /* | |
897 | * We may be holding an active internal node in the tree. | |
898 | */ | |
899 | x = tt + 1; | |
900 | if (t != x) { | |
76f729d7 | 901 | #ifndef RN_DEBUG |
a3aeddca | 902 | *t = *x; |
76f729d7 | 903 | #else |
a3aeddca | 904 | b = t->rn_info; |
905 | *t = *x; | |
906 | t->rn_info = b; | |
76f729d7 | 907 | #endif |
a3aeddca | 908 | t->rn_l->rn_p = t; |
909 | t->rn_r->rn_p = t; | |
910 | p = x->rn_p; | |
911 | if (p->rn_l == x) | |
912 | p->rn_l = t; | |
913 | else | |
914 | p->rn_r = t; | |
915 | } | |
916 | out: | |
917 | tt->rn_flags &= ~RNF_ACTIVE; | |
918 | tt[1].rn_flags &= ~RNF_ACTIVE; | |
919 | return (tt); | |
76f729d7 | 920 | } |
921 | ||
922 | int | |
923 | rn_walktree(h, f, w) | |
a3aeddca | 924 | struct radix_node_head *h; |
925 | int (*f) (); | |
926 | void *w; | |
76f729d7 | 927 | { |
a3aeddca | 928 | int error; |
929 | struct radix_node *base, *next; | |
930 | register struct radix_node *rn = h->rnh_treetop; | |
931 | /* | |
932 | * This gets complicated because we may delete the node | |
933 | * while applying the function f to it, so we need to calculate | |
934 | * the successor node in advance. | |
935 | */ | |
936 | /* First time through node, go left */ | |
937 | while (rn->rn_b >= 0) | |
938 | rn = rn->rn_l; | |
939 | for (;;) { | |
940 | base = rn; | |
941 | /* If at right child go back up, otherwise, go right */ | |
942 | while (rn->rn_p->rn_r == rn && (rn->rn_flags & RNF_ROOT) == 0) | |
943 | rn = rn->rn_p; | |
944 | /* Find the next *leaf* since next node might vanish, too */ | |
945 | for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;) | |
946 | rn = rn->rn_l; | |
947 | next = rn; | |
948 | /* Process leaves */ | |
949 | while ((rn = base)) { | |
950 | base = rn->rn_dupedkey; | |
951 | if (!(rn->rn_flags & RNF_ROOT) && (error = (*f) (rn, w))) | |
952 | return (error); | |
76f729d7 | 953 | } |
a3aeddca | 954 | rn = next; |
955 | if (rn->rn_flags & RNF_ROOT) | |
956 | return (0); | |
957 | } | |
958 | /* NOTREACHED */ | |
76f729d7 | 959 | } |
960 | ||
961 | int | |
962 | rn_inithead(head, off) | |
a3aeddca | 963 | void **head; |
964 | int off; | |
76f729d7 | 965 | { |
a3aeddca | 966 | register struct radix_node_head *rnh; |
967 | register struct radix_node *t, *tt, *ttt; | |
968 | if (*head) | |
76f729d7 | 969 | return (1); |
a3aeddca | 970 | R_Malloc(rnh, struct radix_node_head *, sizeof(*rnh)); |
971 | if (rnh == 0) | |
972 | return (0); | |
973 | memset(rnh, '\0', sizeof(*rnh)); | |
974 | *head = rnh; | |
975 | t = rn_newpair(rn_zeros, off, rnh->rnh_nodes); | |
976 | ttt = rnh->rnh_nodes + 2; | |
977 | t->rn_r = ttt; | |
978 | t->rn_p = t; | |
979 | tt = t->rn_l; | |
980 | tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE; | |
981 | tt->rn_b = -1 - off; | |
982 | *ttt = *tt; | |
983 | ttt->rn_key = rn_ones; | |
984 | rnh->rnh_addaddr = rn_addroute; | |
985 | rnh->rnh_deladdr = rn_delete; | |
986 | rnh->rnh_matchaddr = rn_match; | |
987 | rnh->rnh_lookup = rn_lookup; | |
988 | rnh->rnh_walktree = rn_walktree; | |
989 | rnh->rnh_treetop = t; | |
990 | return (1); | |
76f729d7 | 991 | } |
992 | ||
993 | void | |
994 | rn_init() | |
995 | { | |
a3aeddca | 996 | char *cp, *cplim; |
76f729d7 | 997 | #ifdef KERNEL |
a3aeddca | 998 | struct domain *dom; |
76f729d7 | 999 | |
a3aeddca | 1000 | for (dom = domains; dom; dom = dom->dom_next) |
1001 | if (dom->dom_maxrtkey > max_keylen) | |
1002 | max_keylen = dom->dom_maxrtkey; | |
76f729d7 | 1003 | #endif |
a3aeddca | 1004 | if (max_keylen == 0) { |
1005 | fprintf(stderr, | |
1006 | "rn_init: radix functions require max_keylen be set\n"); | |
1007 | return; | |
1008 | } | |
1009 | R_Malloc(rn_zeros, char *, 3 * max_keylen); | |
1010 | if (rn_zeros == NULL) { | |
1011 | fprintf(stderr, "rn_init failed.\n"); | |
1012 | exit(-1); | |
1013 | } | |
1014 | memset(rn_zeros, '\0', 3 * max_keylen); | |
1015 | rn_ones = cp = rn_zeros + max_keylen; | |
1016 | addmask_key = cplim = rn_ones + max_keylen; | |
1017 | while (cp < cplim) | |
1018 | *cp++ = -1; | |
1019 | if (rn_inithead((void **) &mask_rnhead, 0) == 0) { | |
1020 | fprintf(stderr, "rn_init2 failed.\n"); | |
1021 | exit(-1); | |
1022 | } | |
76f729d7 | 1023 | } |