]>
Commit | Line | Data |
---|---|---|
0a35513e AH |
1 | /* Copyright (C) 2009, 2011 Free Software Foundation, Inc. |
2 | Contributed by Richard Henderson <rth@redhat.com>. | |
3 | ||
4 | This file is part of the GNU Transactional Memory Library (libitm). | |
5 | ||
6 | Libitm is free software; you can redistribute it and/or modify it | |
7 | under the terms of the GNU General Public License as published by | |
8 | the Free Software Foundation; either version 3 of the License, or | |
9 | (at your option) any later version. | |
10 | ||
11 | Libitm is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
13 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | more details. | |
15 | ||
16 | Under Section 7 of GPL version 3, you are granted additional | |
17 | permissions described in the GCC Runtime Library Exception, version | |
18 | 3.1, as published by the Free Software Foundation. | |
19 | ||
20 | You should have received a copy of the GNU General Public License and | |
21 | a copy of the GCC Runtime Library Exception along with this program; | |
22 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
23 | <http://www.gnu.org/licenses/>. */ | |
24 | ||
25 | #ifndef LIBITM_ALPHA_CACHELINE_H | |
26 | #define LIBITM_ALPHA_CACHELINE_H 1 | |
27 | ||
28 | // A cacheline is the smallest unit with which locks are associated. | |
29 | // The current implementation of the _ITM_[RW] barriers assumes that | |
30 | // all data types can fit (aligned) within a cachline, which means | |
31 | // in practice sizeof(complex long double) is the smallest cacheline size. | |
32 | // It ought to be small enough for efficient manipulation of the | |
33 | // modification mask, below. | |
34 | #define CACHELINE_SIZE 64 | |
35 | ||
36 | #ifdef __alpha_bwx__ | |
37 | # include "config/generic/cacheline.h" | |
38 | #else | |
39 | // If we don't have byte-word stores, then we'll never be able to | |
40 | // adjust *all* of the byte loads/stores to be truely atomic. So | |
41 | // only guarantee 4-byte aligned values atomicly stored, exactly | |
42 | // like the native system. Use byte zap instructions to accelerate | |
43 | // sub-word masked stores. | |
44 | ||
45 | namespace GTM HIDDEN { | |
46 | ||
47 | // A gtm_cacheline_mask stores a modified bit for every modified byte | |
48 | // in the cacheline with which it is associated. | |
49 | typedef sized_integral<CACHELINE_SIZE / 8>::type gtm_cacheline_mask; | |
50 | ||
51 | union gtm_cacheline | |
52 | { | |
53 | // Byte access to the cacheline. | |
54 | unsigned char b[CACHELINE_SIZE] __attribute__((aligned(CACHELINE_SIZE))); | |
55 | ||
56 | // Larger sized access to the cacheline. | |
57 | uint16_t u16[CACHELINE_SIZE / sizeof(uint16_t)]; | |
58 | uint32_t u32[CACHELINE_SIZE / sizeof(uint32_t)]; | |
59 | uint64_t u64[CACHELINE_SIZE / sizeof(uint64_t)]; | |
60 | gtm_word w[CACHELINE_SIZE / sizeof(gtm_word)]; | |
61 | ||
62 | // Store S into D, but only the bytes specified by M. | |
63 | static void store_mask(uint32_t *d, uint32_t s, uint8_t m); | |
64 | static void store_mask(uint64_t *d, uint64_t s, uint8_t m); | |
65 | ||
66 | // Copy S to D, but only the bytes specified by M. | |
67 | static void copy_mask (gtm_cacheline * __restrict d, | |
68 | const gtm_cacheline * __restrict s, | |
69 | gtm_cacheline_mask m); | |
70 | ||
71 | // A write barrier to emit after (a series of) copy_mask. | |
72 | static void copy_mask_wb () { atomic_write_barrier(); } | |
73 | }; | |
74 | ||
75 | inline void ALWAYS_INLINE | |
76 | gtm_cacheline::store_mask (uint32_t *d, uint32_t s, uint8_t m) | |
77 | { | |
78 | const uint8_t tm = (1 << sizeof(uint32_t)) - 1; | |
79 | ||
80 | m &= tm; | |
81 | if (__builtin_expect (m, tm)) | |
82 | { | |
83 | if (__builtin_expect (m == tm, 1)) | |
84 | *d = s; | |
85 | else | |
86 | *d = __builtin_alpha_zap (*d, m) | __builtin_alpha_zapnot (s, m); | |
87 | } | |
88 | } | |
89 | ||
90 | inline void ALWAYS_INLINE | |
91 | gtm_cacheline::store_mask (uint64_t *d, uint64_t s, uint8_t m) | |
92 | { | |
93 | if (__builtin_expect (m, 0xff)) | |
94 | { | |
95 | if (__builtin_expect (m == 0xff, 1)) | |
96 | *d = s; | |
97 | else | |
98 | { | |
99 | typedef uint32_t *p32 __attribute__((may_alias)); | |
100 | p32 d32 = reinterpret_cast<p32>(d); | |
101 | ||
102 | if ((m & 0x0f) == 0x0f) | |
103 | { | |
104 | d32[0] = s; | |
105 | m &= 0xf0; | |
106 | } | |
107 | else if ((m & 0xf0) == 0xf0) | |
108 | { | |
109 | d32[1] = s >> 32; | |
110 | m &= 0x0f; | |
111 | } | |
112 | ||
113 | if (m) | |
114 | *d = __builtin_alpha_zap (*d, m) | __builtin_alpha_zapnot (s, m); | |
115 | } | |
116 | } | |
117 | } | |
118 | ||
119 | } // namespace GTM | |
120 | ||
121 | #endif // __alpha_bwx__ | |
122 | #endif // LIBITM_ALPHA_CACHELINE_H |