]> git.ipfire.org Git - thirdparty/glibc.git/blob - linuxthreads/spinlock.c
Test for stack alignment.
[thirdparty/glibc.git] / linuxthreads / spinlock.c
1 /* Linuxthreads - a simple clone()-based implementation of Posix */
2 /* threads for Linux. */
3 /* Copyright (C) 1998 Xavier Leroy (Xavier.Leroy@inria.fr) */
4 /* */
5 /* This program is free software; you can redistribute it and/or */
6 /* modify it under the terms of the GNU Library General Public License */
7 /* as published by the Free Software Foundation; either version 2 */
8 /* of the License, or (at your option) any later version. */
9 /* */
10 /* This program is distributed in the hope that it will be useful, */
11 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
12 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
13 /* GNU Library General Public License for more details. */
14
15 /* Internal locks */
16
17 #include <errno.h>
18 #include <sched.h>
19 #include <time.h>
20 #include <stdlib.h>
21 #include <limits.h>
22 #include "pthread.h"
23 #include "internals.h"
24 #include "spinlock.h"
25 #include "restart.h"
26
27 static void __pthread_acquire(int * spinlock);
28
29 static inline void __pthread_release(int * spinlock)
30 {
31 WRITE_MEMORY_BARRIER();
32 *spinlock = __LT_SPINLOCK_INIT;
33 __asm __volatile ("" : "=m" (*spinlock) : "m" (*spinlock));
34 }
35
36
37 /* The status field of a spinlock is a pointer whose least significant
38 bit is a locked flag.
39
40 Thus the field values have the following meanings:
41
42 status == 0: spinlock is free
43 status == 1: spinlock is taken; no thread is waiting on it
44
45 (status & 1) == 1: spinlock is taken and (status & ~1L) is a
46 pointer to the first waiting thread; other
47 waiting threads are linked via the p_nextlock
48 field.
49 (status & 1) == 0: same as above, but spinlock is not taken.
50
51 The waiting list is not sorted by priority order.
52 Actually, we always insert at top of list (sole insertion mode
53 that can be performed without locking).
54 For __pthread_unlock, we perform a linear search in the list
55 to find the highest-priority, oldest waiting thread.
56 This is safe because there are no concurrent __pthread_unlock
57 operations -- only the thread that locked the mutex can unlock it. */
58
59
60 void internal_function __pthread_lock(struct _pthread_fastlock * lock,
61 pthread_descr self)
62 {
63 #if defined HAS_COMPARE_AND_SWAP
64 long oldstatus, newstatus;
65 int successful_seizure, spurious_wakeup_count;
66 int spin_count;
67 #endif
68
69 #if defined TEST_FOR_COMPARE_AND_SWAP
70 if (!__pthread_has_cas)
71 #endif
72 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
73 {
74 __pthread_acquire(&lock->__spinlock);
75 return;
76 }
77 #endif
78
79 #if defined HAS_COMPARE_AND_SWAP
80 /* First try it without preparation. Maybe it's a completely
81 uncontested lock. */
82 if (lock->__status == 0 && __compare_and_swap (&lock->__status, 0, 1))
83 return;
84
85 spurious_wakeup_count = 0;
86 spin_count = 0;
87
88 /* On SMP, try spinning to get the lock. */
89
90 if (__pthread_smp_kernel) {
91 int max_count = lock->__spinlock * 2 + 10;
92
93 if (max_count > MAX_ADAPTIVE_SPIN_COUNT)
94 max_count = MAX_ADAPTIVE_SPIN_COUNT;
95
96 for (spin_count = 0; spin_count < max_count; spin_count++) {
97 if (((oldstatus = lock->__status) & 1) == 0) {
98 if(__compare_and_swap(&lock->__status, oldstatus, oldstatus | 1))
99 {
100 if (spin_count)
101 lock->__spinlock += (spin_count - lock->__spinlock) / 8;
102 READ_MEMORY_BARRIER();
103 return;
104 }
105 }
106 #ifdef BUSY_WAIT_NOP
107 BUSY_WAIT_NOP;
108 #endif
109 __asm __volatile ("" : "=m" (lock->__status) : "m" (lock->__status));
110 }
111
112 lock->__spinlock += (spin_count - lock->__spinlock) / 8;
113 }
114
115 again:
116
117 /* No luck, try once more or suspend. */
118
119 do {
120 oldstatus = lock->__status;
121 successful_seizure = 0;
122
123 if ((oldstatus & 1) == 0) {
124 newstatus = oldstatus | 1;
125 successful_seizure = 1;
126 } else {
127 if (self == NULL)
128 self = thread_self();
129 newstatus = (long) self | 1;
130 }
131
132 if (self != NULL) {
133 THREAD_SETMEM(self, p_nextlock, (pthread_descr) (oldstatus));
134 /* Make sure the store in p_nextlock completes before performing
135 the compare-and-swap */
136 MEMORY_BARRIER();
137 }
138 } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus));
139
140 /* Suspend with guard against spurious wakeup.
141 This can happen in pthread_cond_timedwait_relative, when the thread
142 wakes up due to timeout and is still on the condvar queue, and then
143 locks the queue to remove itself. At that point it may still be on the
144 queue, and may be resumed by a condition signal. */
145
146 if (!successful_seizure) {
147 for (;;) {
148 suspend(self);
149 if (self->p_nextlock != NULL) {
150 /* Count resumes that don't belong to us. */
151 spurious_wakeup_count++;
152 continue;
153 }
154 break;
155 }
156 goto again;
157 }
158
159 /* Put back any resumes we caught that don't belong to us. */
160 while (spurious_wakeup_count--)
161 restart(self);
162
163 READ_MEMORY_BARRIER();
164 #endif
165 }
166
167 int __pthread_unlock(struct _pthread_fastlock * lock)
168 {
169 #if defined HAS_COMPARE_AND_SWAP
170 long oldstatus;
171 pthread_descr thr, * ptr, * maxptr;
172 int maxprio;
173 #endif
174
175 #if defined TEST_FOR_COMPARE_AND_SWAP
176 if (!__pthread_has_cas)
177 #endif
178 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
179 {
180 __pthread_release(&lock->__spinlock);
181 return 0;
182 }
183 #endif
184
185 #if defined HAS_COMPARE_AND_SWAP
186 WRITE_MEMORY_BARRIER();
187
188 again:
189 while ((oldstatus = lock->__status) == 1) {
190 if (__compare_and_swap_with_release_semantics(&lock->__status,
191 oldstatus, 0))
192 return 0;
193 }
194
195 /* Find thread in waiting queue with maximal priority */
196 ptr = (pthread_descr *) &lock->__status;
197 thr = (pthread_descr) (oldstatus & ~1L);
198 maxprio = 0;
199 maxptr = ptr;
200
201 /* Before we iterate over the wait queue, we need to execute
202 a read barrier, otherwise we may read stale contents of nodes that may
203 just have been inserted by other processors. One read barrier is enough to
204 ensure we have a stable list; we don't need one for each pointer chase
205 through the list, because we are the owner of the lock; other threads
206 can only add nodes at the front; if a front node is consistent,
207 the ones behind it must also be. */
208
209 READ_MEMORY_BARRIER();
210
211 while (thr != 0) {
212 if (thr->p_priority >= maxprio) {
213 maxptr = ptr;
214 maxprio = thr->p_priority;
215 }
216 ptr = &(thr->p_nextlock);
217 thr = (pthread_descr)((long)(thr->p_nextlock) & ~1L);
218 }
219
220 /* Remove max prio thread from waiting list. */
221 if (maxptr == (pthread_descr *) &lock->__status) {
222 /* If max prio thread is at head, remove it with compare-and-swap
223 to guard against concurrent lock operation. This removal
224 also has the side effect of marking the lock as released
225 because the new status comes from thr->p_nextlock whose
226 least significant bit is clear. */
227 thr = (pthread_descr) (oldstatus & ~1L);
228 if (! __compare_and_swap_with_release_semantics
229 (&lock->__status, oldstatus, (long)(thr->p_nextlock) & ~1L))
230 goto again;
231 } else {
232 /* No risk of concurrent access, remove max prio thread normally.
233 But in this case we must also flip the least significant bit
234 of the status to mark the lock as released. */
235 thr = (pthread_descr)((long)*maxptr & ~1L);
236 *maxptr = thr->p_nextlock;
237
238 /* Ensure deletion from linked list completes before we
239 release the lock. */
240 WRITE_MEMORY_BARRIER();
241
242 do {
243 oldstatus = lock->__status;
244 } while (!__compare_and_swap_with_release_semantics(&lock->__status,
245 oldstatus, oldstatus & ~1L));
246 }
247
248 /* Wake up the selected waiting thread. Woken thread can check
249 its own p_nextlock field for NULL to detect that it has been removed. No
250 barrier is needed here, since restart() and suspend() take
251 care of memory synchronization. */
252
253 thr->p_nextlock = NULL;
254 restart(thr);
255
256 return 0;
257 #endif
258 }
259
260 /*
261 * Alternate fastlocks do not queue threads directly. Instead, they queue
262 * these wait queue node structures. When a timed wait wakes up due to
263 * a timeout, it can leave its wait node in the queue (because there
264 * is no safe way to remove from the quue). Some other thread will
265 * deallocate the abandoned node.
266 */
267
268
269 struct wait_node {
270 struct wait_node *next; /* Next node in null terminated linked list */
271 pthread_descr thr; /* The thread waiting with this node */
272 int abandoned; /* Atomic flag */
273 };
274
275 static long wait_node_free_list;
276 static int wait_node_free_list_spinlock;
277
278 /* Allocate a new node from the head of the free list using an atomic
279 operation, or else using malloc if that list is empty. A fundamental
280 assumption here is that we can safely access wait_node_free_list->next.
281 That's because we never free nodes once we allocate them, so a pointer to a
282 node remains valid indefinitely. */
283
284 static struct wait_node *wait_node_alloc(void)
285 {
286 struct wait_node *new_node = 0;
287
288 __pthread_acquire(&wait_node_free_list_spinlock);
289 if (wait_node_free_list != 0) {
290 new_node = (struct wait_node *) wait_node_free_list;
291 wait_node_free_list = (long) new_node->next;
292 }
293 WRITE_MEMORY_BARRIER();
294 __pthread_release(&wait_node_free_list_spinlock);
295
296 if (new_node == 0)
297 return malloc(sizeof *wait_node_alloc());
298
299 return new_node;
300 }
301
302 /* Return a node to the head of the free list using an atomic
303 operation. */
304
305 static void wait_node_free(struct wait_node *wn)
306 {
307 __pthread_acquire(&wait_node_free_list_spinlock);
308 wn->next = (struct wait_node *) wait_node_free_list;
309 wait_node_free_list = (long) wn;
310 WRITE_MEMORY_BARRIER();
311 __pthread_release(&wait_node_free_list_spinlock);
312 return;
313 }
314
315 #if defined HAS_COMPARE_AND_SWAP
316
317 /* Remove a wait node from the specified queue. It is assumed
318 that the removal takes place concurrently with only atomic insertions at the
319 head of the queue. */
320
321 static void wait_node_dequeue(struct wait_node **pp_head,
322 struct wait_node **pp_node,
323 struct wait_node *p_node)
324 {
325 /* If the node is being deleted from the head of the
326 list, it must be deleted using atomic compare-and-swap.
327 Otherwise it can be deleted in the straightforward way. */
328
329 if (pp_node == pp_head) {
330 /* We don't need a read barrier between these next two loads,
331 because it is assumed that the caller has already ensured
332 the stability of *p_node with respect to p_node. */
333
334 long oldvalue = (long) p_node;
335 long newvalue = (long) p_node->next;
336
337 if (__compare_and_swap((long *) pp_node, oldvalue, newvalue))
338 return;
339
340 /* Oops! Compare and swap failed, which means the node is
341 no longer first. We delete it using the ordinary method. But we don't
342 know the identity of the node which now holds the pointer to the node
343 being deleted, so we must search from the beginning. */
344
345 for (pp_node = pp_head; p_node != *pp_node; ) {
346 pp_node = &(*pp_node)->next;
347 READ_MEMORY_BARRIER(); /* Stabilize *pp_node for next iteration. */
348 }
349 }
350
351 *pp_node = p_node->next;
352 return;
353 }
354
355 #endif
356
357 void __pthread_alt_lock(struct _pthread_fastlock * lock,
358 pthread_descr self)
359 {
360 #if defined HAS_COMPARE_AND_SWAP
361 long oldstatus, newstatus;
362 #endif
363 struct wait_node wait_node;
364
365 #if defined TEST_FOR_COMPARE_AND_SWAP
366 if (!__pthread_has_cas)
367 #endif
368 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
369 {
370 int suspend_needed = 0;
371 __pthread_acquire(&lock->__spinlock);
372
373 if (lock->__status == 0)
374 lock->__status = 1;
375 else {
376 if (self == NULL)
377 self = thread_self();
378
379 wait_node.abandoned = 0;
380 wait_node.next = (struct wait_node *) lock->__status;
381 wait_node.thr = self;
382 lock->__status = (long) &wait_node;
383 suspend_needed = 1;
384 }
385
386 __pthread_release(&lock->__spinlock);
387
388 if (suspend_needed)
389 suspend (self);
390 return;
391 }
392 #endif
393
394 #if defined HAS_COMPARE_AND_SWAP
395 do {
396 oldstatus = lock->__status;
397 if (oldstatus == 0) {
398 newstatus = 1;
399 } else {
400 if (self == NULL)
401 self = thread_self();
402 wait_node.thr = self;
403 newstatus = (long) &wait_node;
404 }
405 wait_node.abandoned = 0;
406 wait_node.next = (struct wait_node *) oldstatus;
407 /* Make sure the store in wait_node.next completes before performing
408 the compare-and-swap */
409 MEMORY_BARRIER();
410 } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus));
411
412 /* Suspend. Note that unlike in __pthread_lock, we don't worry
413 here about spurious wakeup. That's because this lock is not
414 used in situations where that can happen; the restart can
415 only come from the previous lock owner. */
416
417 if (oldstatus != 0)
418 suspend(self);
419
420 READ_MEMORY_BARRIER();
421 #endif
422 }
423
424 /* Timed-out lock operation; returns 0 to indicate timeout. */
425
426 int __pthread_alt_timedlock(struct _pthread_fastlock * lock,
427 pthread_descr self, const struct timespec *abstime)
428 {
429 long oldstatus = 0;
430 #if defined HAS_COMPARE_AND_SWAP
431 long newstatus;
432 #endif
433 struct wait_node *p_wait_node = wait_node_alloc();
434
435 /* Out of memory, just give up and do ordinary lock. */
436 if (p_wait_node == 0) {
437 __pthread_alt_lock(lock, self);
438 return 1;
439 }
440
441 #if defined TEST_FOR_COMPARE_AND_SWAP
442 if (!__pthread_has_cas)
443 #endif
444 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
445 {
446 __pthread_acquire(&lock->__spinlock);
447
448 if (lock->__status == 0)
449 lock->__status = 1;
450 else {
451 if (self == NULL)
452 self = thread_self();
453
454 p_wait_node->abandoned = 0;
455 p_wait_node->next = (struct wait_node *) lock->__status;
456 p_wait_node->thr = self;
457 lock->__status = (long) p_wait_node;
458 oldstatus = 1; /* force suspend */
459 }
460
461 __pthread_release(&lock->__spinlock);
462 goto suspend;
463 }
464 #endif
465
466 #if defined HAS_COMPARE_AND_SWAP
467 do {
468 oldstatus = lock->__status;
469 if (oldstatus == 0) {
470 newstatus = 1;
471 } else {
472 if (self == NULL)
473 self = thread_self();
474 p_wait_node->thr = self;
475 newstatus = (long) p_wait_node;
476 }
477 p_wait_node->abandoned = 0;
478 p_wait_node->next = (struct wait_node *) oldstatus;
479 /* Make sure the store in wait_node.next completes before performing
480 the compare-and-swap */
481 MEMORY_BARRIER();
482 } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus));
483 #endif
484
485 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
486 suspend:
487 #endif
488
489 /* If we did not get the lock, do a timed suspend. If we wake up due
490 to a timeout, then there is a race; the old lock owner may try
491 to remove us from the queue. This race is resolved by us and the owner
492 doing an atomic testandset() to change the state of the wait node from 0
493 to 1. If we succeed, then it's a timeout and we abandon the node in the
494 queue. If we fail, it means the owner gave us the lock. */
495
496 if (oldstatus != 0) {
497 if (timedsuspend(self, abstime) == 0) {
498 if (!testandset(&p_wait_node->abandoned))
499 return 0; /* Timeout! */
500
501 /* Eat oustanding resume from owner, otherwise wait_node_free() below
502 will race with owner's wait_node_dequeue(). */
503 suspend(self);
504 }
505 }
506
507 wait_node_free(p_wait_node);
508
509 READ_MEMORY_BARRIER();
510
511 return 1; /* Got the lock! */
512 }
513
514 void __pthread_alt_unlock(struct _pthread_fastlock *lock)
515 {
516 struct wait_node *p_node, **pp_node, *p_max_prio, **pp_max_prio;
517 struct wait_node ** const pp_head = (struct wait_node **) &lock->__status;
518 int maxprio;
519
520 WRITE_MEMORY_BARRIER();
521
522 #if defined TEST_FOR_COMPARE_AND_SWAP
523 if (!__pthread_has_cas)
524 #endif
525 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
526 {
527 __pthread_acquire(&lock->__spinlock);
528 }
529 #endif
530
531 while (1) {
532
533 /* If no threads are waiting for this lock, try to just
534 atomically release it. */
535 #if defined TEST_FOR_COMPARE_AND_SWAP
536 if (!__pthread_has_cas)
537 #endif
538 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
539 {
540 if (lock->__status == 0 || lock->__status == 1) {
541 lock->__status = 0;
542 break;
543 }
544 }
545 #endif
546
547 #if defined TEST_FOR_COMPARE_AND_SWAP
548 else
549 #endif
550
551 #if defined HAS_COMPARE_AND_SWAP
552 {
553 long oldstatus = lock->__status;
554 if (oldstatus == 0 || oldstatus == 1) {
555 if (__compare_and_swap_with_release_semantics (&lock->__status, oldstatus, 0))
556 break;
557 else
558 continue;
559 }
560 }
561 #endif
562
563 /* Process the entire queue of wait nodes. Remove all abandoned
564 wait nodes and put them into the global free queue, and
565 remember the one unabandoned node which refers to the thread
566 having the highest priority. */
567
568 pp_max_prio = pp_node = pp_head;
569 p_max_prio = p_node = *pp_head;
570 maxprio = INT_MIN;
571
572 READ_MEMORY_BARRIER(); /* Prevent access to stale data through p_node */
573
574 while (p_node != (struct wait_node *) 1) {
575 int prio;
576
577 if (p_node->abandoned) {
578 /* Remove abandoned node. */
579 #if defined TEST_FOR_COMPARE_AND_SWAP
580 if (!__pthread_has_cas)
581 #endif
582 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
583 *pp_node = p_node->next;
584 #endif
585 #if defined TEST_FOR_COMPARE_AND_SWAP
586 else
587 #endif
588 #if defined HAS_COMPARE_AND_SWAP
589 wait_node_dequeue(pp_head, pp_node, p_node);
590 #endif
591 wait_node_free(p_node);
592 /* Note that the next assignment may take us to the beginning
593 of the queue, to newly inserted nodes, if pp_node == pp_head.
594 In that case we need a memory barrier to stabilize the first of
595 these new nodes. */
596 p_node = *pp_node;
597 if (pp_node == pp_head)
598 READ_MEMORY_BARRIER(); /* No stale reads through p_node */
599 continue;
600 } else if ((prio = p_node->thr->p_priority) >= maxprio) {
601 /* Otherwise remember it if its thread has a higher or equal priority
602 compared to that of any node seen thus far. */
603 maxprio = prio;
604 pp_max_prio = pp_node;
605 p_max_prio = p_node;
606 }
607
608 /* This canno6 jump backward in the list, so no further read
609 barrier is needed. */
610 pp_node = &p_node->next;
611 p_node = *pp_node;
612 }
613
614 /* If all threads abandoned, go back to top */
615 if (maxprio == INT_MIN)
616 continue;
617
618 ASSERT (p_max_prio != (struct wait_node *) 1);
619
620 /* Now we want to to remove the max priority thread's wait node from
621 the list. Before we can do this, we must atomically try to change the
622 node's abandon state from zero to nonzero. If we succeed, that means we
623 have the node that we will wake up. If we failed, then it means the
624 thread timed out and abandoned the node in which case we repeat the
625 whole unlock operation. */
626
627 if (!testandset(&p_max_prio->abandoned)) {
628 #if defined TEST_FOR_COMPARE_AND_SWAP
629 if (!__pthread_has_cas)
630 #endif
631 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
632 *pp_max_prio = p_max_prio->next;
633 #endif
634 #if defined TEST_FOR_COMPARE_AND_SWAP
635 else
636 #endif
637 #if defined HAS_COMPARE_AND_SWAP
638 wait_node_dequeue(pp_head, pp_max_prio, p_max_prio);
639 #endif
640 restart(p_max_prio->thr);
641 break;
642 }
643 }
644
645 #if defined TEST_FOR_COMPARE_AND_SWAP
646 if (!__pthread_has_cas)
647 #endif
648 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
649 {
650 __pthread_release(&lock->__spinlock);
651 }
652 #endif
653 }
654
655
656 /* Compare-and-swap emulation with a spinlock */
657
658 #ifdef TEST_FOR_COMPARE_AND_SWAP
659 int __pthread_has_cas = 0;
660 #endif
661
662 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
663
664 int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
665 int * spinlock)
666 {
667 int res;
668
669 __pthread_acquire(spinlock);
670
671 if (*ptr == oldval) {
672 *ptr = newval; res = 1;
673 } else {
674 res = 0;
675 }
676
677 __pthread_release(spinlock);
678
679 return res;
680 }
681
682 #endif
683
684 /* The retry strategy is as follows:
685 - We test and set the spinlock MAX_SPIN_COUNT times, calling
686 sched_yield() each time. This gives ample opportunity for other
687 threads with priority >= our priority to make progress and
688 release the spinlock.
689 - If a thread with priority < our priority owns the spinlock,
690 calling sched_yield() repeatedly is useless, since we're preventing
691 the owning thread from making progress and releasing the spinlock.
692 So, after MAX_SPIN_LOCK attemps, we suspend the calling thread
693 using nanosleep(). This again should give time to the owning thread
694 for releasing the spinlock.
695 Notice that the nanosleep() interval must not be too small,
696 since the kernel does busy-waiting for short intervals in a realtime
697 process (!). The smallest duration that guarantees thread
698 suspension is currently 2ms.
699 - When nanosleep() returns, we try again, doing MAX_SPIN_COUNT
700 sched_yield(), then sleeping again if needed. */
701
702 static void __pthread_acquire(int * spinlock)
703 {
704 int cnt = 0;
705 struct timespec tm;
706
707 READ_MEMORY_BARRIER();
708
709 while (testandset(spinlock)) {
710 if (cnt < MAX_SPIN_COUNT) {
711 sched_yield();
712 cnt++;
713 } else {
714 tm.tv_sec = 0;
715 tm.tv_nsec = SPIN_SLEEP_DURATION;
716 nanosleep(&tm, NULL);
717 cnt = 0;
718 }
719 }
720 }