]>
git.ipfire.org Git - thirdparty/gcc.git/blob - libitm/method-wbetl.cc
1 /* Copyright (C) 2009, 2011 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Transactional Memory Library (libitm).
6 Libitm is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 Libitm is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
31 class wbetl_dispatch
: public abi_dispatch
34 static const size_t RW_SET_SIZE
= 4096;
42 r_entry
*m_rset_entries
;
43 size_t m_rset_nb_entries
;
48 /* There's a hashtable where the locks are held, so multiple
49 cachelines can hash to a given bucket. This link points to the
50 possible next cacheline that also hashes to this bucket. */
53 /* Every entry in this bucket (accessed by NEXT) has the same LOCK
62 w_entry
*m_wset_entries
;
63 size_t m_wset_nb_entries
;
65 bool m_wset_reallocate
;
70 gtm_cacheline_page
*m_cache_page
;
71 unsigned m_n_cache_page
;
74 bool local_w_entry_p (w_entry
*w
);
75 bool has_read (gtm_stmlock
*lock
);
79 gtm_cacheline
*do_write_lock(gtm_cacheline
*);
80 gtm_cacheline
*do_after_write_lock(gtm_cacheline
*);
81 const gtm_cacheline
*do_read_lock(const gtm_cacheline
*, bool);
86 virtual const gtm_cacheline
*read_lock(const gtm_cacheline
*, ls_modifier
);
87 virtual mask_pair
write_lock(gtm_cacheline
*, ls_modifier
);
89 virtual bool trycommit();
90 virtual void rollback();
91 virtual void reinit();
93 virtual bool trydropreference (void *, size_t);
96 /* Check if W is one of our write locks. */
99 wbetl_dispatch::local_w_entry_p (w_entry
*w
)
101 return (m_wset_entries
<= w
&& w
< m_wset_entries
+ m_wset_nb_entries
);
104 /* Check if stripe has been read previously. */
107 wbetl_dispatch::has_read (gtm_stmlock
*lock
)
109 // ??? Consider using an AA tree to lookup the r_set entries.
110 size_t n
= m_rset_nb_entries
;
111 for (size_t i
= 0; i
< n
; ++i
)
112 if (m_rset_entries
[i
].lock
== lock
)
118 /* Validate read set, i.e. check if all read addresses are still valid now. */
121 wbetl_dispatch::validate ()
123 __sync_synchronize ();
125 size_t n
= m_rset_nb_entries
;
126 for (size_t i
= 0; i
< n
; ++i
)
128 r_entry
*r
= &m_rset_entries
[i
];
129 gtm_stmlock l
= *r
->lock
;
131 if (gtm_stmlock_owned_p (l
))
133 w_entry
*w
= (w_entry
*) gtm_stmlock_get_addr (l
);
135 // If someone has locked us, it better be by someone in the
137 if (!local_w_entry_p (w
))
140 else if (gtm_stmlock_get_version (l
) != r
->version
)
147 /* Extend the snapshot range. */
150 wbetl_dispatch::extend ()
152 gtm_version now
= gtm_get_clock ();
162 /* Acquire a write lock on ADDR. */
165 wbetl_dispatch::do_write_lock(gtm_cacheline
*addr
)
170 w_entry
*w
, *prev
= NULL
;
172 lock
= gtm_get_stmlock (addr
);
176 if (gtm_stmlock_owned_p (l
))
178 w
= (w_entry
*) gtm_stmlock_get_addr (l
);
180 /* Did we previously write the same address? */
181 if (local_w_entry_p (w
))
186 if (addr
== prev
->addr
)
188 if (prev
->next
== NULL
)
193 /* Get version from previous entry write set. */
194 version
= prev
->version
;
196 /* If there's not enough entries, we must reallocate the array,
197 which invalidates all pointers to write set entries, which
198 means we have to restart the transaction. */
199 if (m_wset_nb_entries
== m_wset_size
)
202 m_wset_reallocate
= true;
203 gtm_tx()->restart (RESTART_REALLOCATE
);
206 w
= &m_wset_entries
[m_wset_nb_entries
];
210 gtm_tx()->restart (RESTART_LOCKED_WRITE
);
214 version
= gtm_stmlock_get_version (l
);
216 /* We might have read an older version previously. */
220 gtm_tx()->restart (RESTART_VALIDATE_WRITE
);
223 /* Extend write set, aborting to reallocate write set entries. */
224 if (m_wset_nb_entries
== m_wset_size
)
227 m_wset_reallocate
= true;
228 gtm_tx()->restart (RESTART_REALLOCATE
);
231 /* Acquire the lock. */
232 w
= &m_wset_entries
[m_wset_nb_entries
];
233 l2
= gtm_stmlock_set_owned (w
);
234 l
= __sync_val_compare_and_swap (lock
, l
, l2
);
236 goto restart_no_load
;
246 w
->version
= version
;
248 gtm_cacheline_page
*page
= m_cache_page
;
249 unsigned index
= m_n_cache_page
;
251 if (page
== NULL
|| index
== gtm_cacheline_page::LINES
)
253 gtm_cacheline_page
*npage
= new gtm_cacheline_page
;
255 m_cache_page
= page
= npage
;
260 m_n_cache_page
= index
+ 1;
262 gtm_cacheline
*line
= &page
->lines
[index
];
264 page
->masks
[index
] = 0;
271 wbetl_dispatch::do_after_write_lock (gtm_cacheline
*addr
)
277 lock
= gtm_get_stmlock (addr
);
279 assert (gtm_stmlock_owned_p (l
));
281 w
= (w_entry
*) gtm_stmlock_get_addr (l
);
282 assert (local_w_entry_p (w
));
292 /* Acquire a read lock on ADDR. */
294 const gtm_cacheline
*
295 wbetl_dispatch::do_read_lock (const gtm_cacheline
*addr
, bool after_read
)
302 lock
= gtm_get_stmlock (addr
);
306 if (gtm_stmlock_owned_p (l
))
308 w
= (w_entry
*) gtm_stmlock_get_addr (l
);
310 /* Did we previously write the same address? */
311 if (local_w_entry_p (w
))
323 gtm_tx()->restart (RESTART_LOCKED_READ
);
326 version
= gtm_stmlock_get_version (l
);
328 /* If version is no longer valid, re-validate the read set. */
332 gtm_tx()->restart (RESTART_VALIDATE_READ
);
336 // Verify that the version has not yet been overwritten. The read
337 // value has not yet been added to read set and may not have been
338 // checked during the extend.
340 // ??? This only makes sense if we're actually reading the value
341 // and returning it now -- which I believe the original TinySTM
342 // did. This doesn't make a whole lot of sense when we're
343 // manipulating cachelines as we are now. Do we need some other
344 // form of lock verification here, or is the validate call in
345 // trycommit sufficient?
347 __sync_synchronize ();
352 goto restart_no_load
;
361 /* Add the address and version to the read set. */
362 if (m_rset_nb_entries
== m_rset_size
)
366 m_rset_entries
= (r_entry
*)
367 xrealloc (m_rset_entries
, m_rset_size
* sizeof(r_entry
));
369 r
= &m_rset_entries
[m_rset_nb_entries
++];
370 r
->version
= version
;
377 const gtm_cacheline
*
378 wbetl_dispatch::read_lock (const gtm_cacheline
*addr
, ls_modifier ltype
)
385 return do_read_lock (addr
, false);
387 return do_read_lock (addr
, true);
389 return do_after_write_lock (const_cast<gtm_cacheline
*>(addr
));
391 return do_write_lock (const_cast<gtm_cacheline
*>(addr
));
397 abi_dispatch::mask_pair
398 wbetl_dispatch::write_lock (gtm_cacheline
*addr
, ls_modifier ltype
)
405 return mask_pair (addr
, &mask_sink
);
408 line
= do_write_lock (addr
);
411 line
= do_after_write_lock (addr
);
417 return mask_pair (line
, gtm_cacheline_page::mask_for_page_line (line
));
420 /* Commit the transaction. */
423 wbetl_dispatch::trycommit ()
425 const size_t n
= m_wset_nb_entries
;
428 /* Get commit timestamp. */
429 gtm_version t
= gtm_inc_clock ();
431 /* Validate only if a concurrent transaction has started since. */
432 if (m_start
!= t
- 1 && !validate ())
435 /* Install new versions. */
436 for (size_t i
= 0; i
< n
; ++i
)
438 w_entry
*w
= &m_wset_entries
[i
];
439 gtm_cacheline_mask mask
440 = *gtm_cacheline_page::mask_for_page_line (w
->value
);
442 /* Filter out any updates that overlap the libitm stack. */
443 mask
= gtm_mask_stack (w
->addr
, mask
);
445 gtm_cacheline::copy_mask (w
->addr
, w
->value
, mask
);
448 /* Only emit barrier after all cachelines are copied. */
449 gtm_cacheline::copy_mask_wb ();
452 for (size_t i
= 0; i
< n
; ++i
)
454 w_entry
*w
= &m_wset_entries
[i
];
456 /* Every link along the chain has the same lock, but only
457 bother dropping the lock once per bucket (at the end). */
459 *w
->lock
= gtm_stmlock_set_version (t
);
463 __sync_synchronize ();
468 wbetl_dispatch::rollback ()
471 const size_t n
= m_wset_nb_entries
;
472 for (size_t i
= 0; i
< n
; ++i
)
474 w_entry
*w
= &m_wset_entries
[i
];
476 /* Every link along the chain has the same lock, but only
477 bother dropping the lock once per bucket (at the end). */
479 *w
->lock
= gtm_stmlock_set_version (w
->version
);
482 __sync_synchronize ();
486 wbetl_dispatch::reinit ()
488 gtm_cacheline_page
*page
;
490 m_rset_nb_entries
= 0;
491 m_wset_nb_entries
= 0;
493 if (m_wset_reallocate
)
495 m_wset_reallocate
= 0;
496 m_wset_entries
= (w_entry
*)
497 xrealloc (m_wset_entries
, m_wset_size
* sizeof(w_entry
));
503 /* Release all but one of the pages of cachelines. */
504 gtm_cacheline_page
*prev
= page
->prev
;
511 /* Start the next cacheline allocation from the beginning. */
515 m_start
= m_end
= gtm_get_clock ();
519 wbetl_dispatch::fini ()
522 free (m_rset_entries
);
523 free (m_wset_entries
);
527 /* Attempt to drop any internal references to PTR. Return TRUE if successful.
529 This is an adaptation of the transactional memcpy function.
531 What we do here is flush out the current transactional content of
532 PTR to real memory, and remove the write mask bits associated with
533 it so future commits will ignore this piece of memory. */
536 wbetl_dispatch::trydropreference (void *ptr
, size_t size
)
544 uintptr_t isrc
= (uintptr_t)ptr
;
545 // The position in the source cacheline where *PTR starts.
546 uintptr_t sofs
= isrc
& (CACHELINE_SIZE
- 1);
548 = reinterpret_cast<gtm_cacheline
*>(isrc
& -CACHELINE_SIZE
);
549 unsigned char *dst
= (unsigned char *)ptr
;
550 abi_dispatch::mask_pair pair
;
552 // If we're trying to drop a reference, we should already have a
553 // write lock on it. If we don't have one, there's no work to do.
554 if (!gtm_stmlock_owned_p (*gtm_get_stmlock (src
)))
557 // We copy the data in three stages:
559 // (a) Copy stray bytes at the beginning that are smaller than a
563 size_t sleft
= CACHELINE_SIZE
- sofs
;
564 size_t min
= (size
<= sleft
? size
: sleft
);
566 // WaW will give us the current locked entry.
567 pair
= this->write_lock (src
, WaW
);
569 // *jedi mind wave*...these aren't the droids you're looking for.
570 *pair
.mask
&= ~((((gtm_cacheline_mask
)1 << min
) - 1) << sofs
);
572 memcpy (dst
, &pair
.line
->b
[sofs
], min
);
578 // (b) Copy subsequent cacheline sized chunks.
579 while (size
>= CACHELINE_SIZE
)
581 pair
= this->write_lock(src
, WaW
);
583 memcpy (dst
, pair
.line
, CACHELINE_SIZE
);
584 dst
+= CACHELINE_SIZE
;
586 size
-= CACHELINE_SIZE
;
589 // (c) Copy anything left over.
592 pair
= this->write_lock(src
, WaW
);
593 *pair
.mask
&= ~(((gtm_cacheline_mask
)1 << size
) - 1);
594 memcpy (dst
, pair
.line
, size
);
597 // No need to drop locks, since we're going to abort the transaction
604 wbetl_dispatch::wbetl_dispatch ()
605 : abi_dispatch (false, false)
607 m_rset_entries
= (r_entry
*) xmalloc (RW_SET_SIZE
* sizeof(r_entry
));
608 m_rset_nb_entries
= 0;
609 m_rset_size
= RW_SET_SIZE
;
611 m_wset_entries
= (w_entry
*) xmalloc (RW_SET_SIZE
* sizeof(w_entry
));
612 m_wset_nb_entries
= 0;
613 m_wset_size
= RW_SET_SIZE
;
614 m_wset_reallocate
= false;
616 m_start
= m_end
= gtm_get_clock ();
625 GTM::dispatch_wbetl ()
627 return new wbetl_dispatch ();