]>
Commit | Line | Data |
---|---|---|
b667dd70 | 1 | //===-- sanitizer_fuchsia.cpp ---------------------------------------------===// |
5d3805fc | 2 | // |
b667dd70 ML |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. | |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |
5d3805fc | 6 | // |
eac97531 | 7 | //===----------------------------------------------------------------------===// |
5d3805fc JJ |
8 | // |
9 | // This file is shared between AddressSanitizer and other sanitizer | |
10 | // run-time libraries and implements Fuchsia-specific functions from | |
11 | // sanitizer_common.h. | |
eac97531 | 12 | //===----------------------------------------------------------------------===// |
5d3805fc JJ |
13 | |
14 | #include "sanitizer_fuchsia.h" | |
15 | #if SANITIZER_FUCHSIA | |
16 | ||
17 | #include "sanitizer_common.h" | |
18 | #include "sanitizer_libc.h" | |
19 | #include "sanitizer_mutex.h" | |
5d3805fc JJ |
20 | |
21 | #include <limits.h> | |
22 | #include <pthread.h> | |
23 | #include <stdlib.h> | |
24 | #include <unistd.h> | |
5d3805fc JJ |
25 | #include <zircon/errors.h> |
26 | #include <zircon/process.h> | |
27 | #include <zircon/syscalls.h> | |
28 | ||
29 | namespace __sanitizer { | |
30 | ||
31 | void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); } | |
32 | ||
33 | uptr internal_sched_yield() { | |
34 | zx_status_t status = _zx_nanosleep(0); | |
35 | CHECK_EQ(status, ZX_OK); | |
36 | return 0; // Why doesn't this return void? | |
37 | } | |
38 | ||
39 | static void internal_nanosleep(zx_time_t ns) { | |
40 | zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns)); | |
41 | CHECK_EQ(status, ZX_OK); | |
42 | } | |
43 | ||
44 | unsigned int internal_sleep(unsigned int seconds) { | |
45 | internal_nanosleep(ZX_SEC(seconds)); | |
46 | return 0; | |
47 | } | |
48 | ||
b667dd70 ML |
49 | u64 NanoTime() { |
50 | zx_time_t time; | |
51 | zx_status_t status = _zx_clock_get(ZX_CLOCK_UTC, &time); | |
52 | CHECK_EQ(status, ZX_OK); | |
53 | return time; | |
54 | } | |
eac97531 | 55 | |
b667dd70 | 56 | u64 MonotonicNanoTime() { return _zx_clock_get_monotonic(); } |
5d3805fc JJ |
57 | |
58 | uptr internal_getpid() { | |
59 | zx_info_handle_basic_t info; | |
60 | zx_status_t status = | |
61 | _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info, | |
62 | sizeof(info), NULL, NULL); | |
63 | CHECK_EQ(status, ZX_OK); | |
64 | uptr pid = static_cast<uptr>(info.koid); | |
65 | CHECK_EQ(pid, info.koid); | |
66 | return pid; | |
67 | } | |
68 | ||
69 | uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); } | |
70 | ||
eac97531 | 71 | tid_t GetTid() { return GetThreadSelf(); } |
5d3805fc JJ |
72 | |
73 | void Abort() { abort(); } | |
74 | ||
75 | int Atexit(void (*function)(void)) { return atexit(function); } | |
76 | ||
77 | void SleepForSeconds(int seconds) { internal_sleep(seconds); } | |
78 | ||
79 | void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); } | |
80 | ||
81 | void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) { | |
82 | pthread_attr_t attr; | |
83 | CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0); | |
84 | void *base; | |
85 | size_t size; | |
86 | CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0); | |
87 | CHECK_EQ(pthread_attr_destroy(&attr), 0); | |
88 | ||
89 | *stack_bottom = reinterpret_cast<uptr>(base); | |
90 | *stack_top = *stack_bottom + size; | |
91 | } | |
92 | ||
b667dd70 | 93 | void InitializePlatformEarly() {} |
5d3805fc | 94 | void MaybeReexec() {} |
eac97531 | 95 | void CheckASLR() {} |
b667dd70 | 96 | void CheckMPROTECT() {} |
eac97531 | 97 | void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {} |
5d3805fc JJ |
98 | void DisableCoreDumperIfNecessary() {} |
99 | void InstallDeadlySignalHandlers(SignalHandlerType handler) {} | |
5d3805fc JJ |
100 | void SetAlternateSignalStack() {} |
101 | void UnsetAlternateSignalStack() {} | |
102 | void InitTlsSize() {} | |
103 | ||
104 | void PrintModuleMap() {} | |
105 | ||
106 | bool SignalContext::IsStackOverflow() const { return false; } | |
107 | void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); } | |
108 | const char *SignalContext::Describe() const { UNIMPLEMENTED(); } | |
109 | ||
5d3805fc JJ |
110 | enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 }; |
111 | ||
112 | BlockingMutex::BlockingMutex() { | |
113 | // NOTE! It's important that this use internal_memset, because plain | |
114 | // memset might be intercepted (e.g., actually be __asan_memset). | |
115 | // Defining this so the compiler initializes each field, e.g.: | |
116 | // BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {} | |
117 | // might result in the compiler generating a call to memset, which would | |
118 | // have the same problem. | |
119 | internal_memset(this, 0, sizeof(*this)); | |
120 | } | |
121 | ||
122 | void BlockingMutex::Lock() { | |
123 | CHECK_EQ(owner_, 0); | |
124 | atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_); | |
125 | if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked) | |
126 | return; | |
127 | while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) { | |
b667dd70 ML |
128 | zx_status_t status = |
129 | _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m), MtxSleeping, | |
130 | ZX_HANDLE_INVALID, ZX_TIME_INFINITE); | |
5d3805fc JJ |
131 | if (status != ZX_ERR_BAD_STATE) // Normal race. |
132 | CHECK_EQ(status, ZX_OK); | |
133 | } | |
134 | } | |
135 | ||
136 | void BlockingMutex::Unlock() { | |
137 | atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_); | |
138 | u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release); | |
139 | CHECK_NE(v, MtxUnlocked); | |
140 | if (v == MtxSleeping) { | |
141 | zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1); | |
142 | CHECK_EQ(status, ZX_OK); | |
143 | } | |
144 | } | |
145 | ||
146 | void BlockingMutex::CheckLocked() { | |
147 | atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_); | |
148 | CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed)); | |
149 | } | |
150 | ||
151 | uptr GetPageSize() { return PAGE_SIZE; } | |
152 | ||
153 | uptr GetMmapGranularity() { return PAGE_SIZE; } | |
154 | ||
155 | sanitizer_shadow_bounds_t ShadowBounds; | |
156 | ||
eac97531 | 157 | uptr GetMaxUserVirtualAddress() { |
5d3805fc JJ |
158 | ShadowBounds = __sanitizer_shadow_bounds(); |
159 | return ShadowBounds.memory_limit - 1; | |
160 | } | |
161 | ||
eac97531 ML |
162 | uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); } |
163 | ||
5d3805fc JJ |
164 | static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type, |
165 | bool raw_report, bool die_for_nomem) { | |
166 | size = RoundUpTo(size, PAGE_SIZE); | |
167 | ||
168 | zx_handle_t vmo; | |
169 | zx_status_t status = _zx_vmo_create(size, 0, &vmo); | |
170 | if (status != ZX_OK) { | |
171 | if (status != ZX_ERR_NO_MEMORY || die_for_nomem) | |
172 | ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, | |
173 | raw_report); | |
174 | return nullptr; | |
175 | } | |
176 | _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type, | |
177 | internal_strlen(mem_type)); | |
178 | ||
179 | // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that? | |
180 | uintptr_t addr; | |
eac97531 ML |
181 | status = |
182 | _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0, | |
183 | vmo, 0, size, &addr); | |
5d3805fc JJ |
184 | _zx_handle_close(vmo); |
185 | ||
186 | if (status != ZX_OK) { | |
187 | if (status != ZX_ERR_NO_MEMORY || die_for_nomem) | |
188 | ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, | |
189 | raw_report); | |
190 | return nullptr; | |
191 | } | |
192 | ||
193 | IncreaseTotalMmap(size); | |
194 | ||
195 | return reinterpret_cast<void *>(addr); | |
196 | } | |
197 | ||
198 | void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) { | |
199 | return DoAnonymousMmapOrDie(size, mem_type, raw_report, true); | |
200 | } | |
201 | ||
202 | void *MmapNoReserveOrDie(uptr size, const char *mem_type) { | |
203 | return MmapOrDie(size, mem_type); | |
204 | } | |
205 | ||
206 | void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { | |
207 | return DoAnonymousMmapOrDie(size, mem_type, false, false); | |
208 | } | |
209 | ||
eac97531 ML |
210 | uptr ReservedAddressRange::Init(uptr init_size, const char *name, |
211 | uptr fixed_addr) { | |
212 | init_size = RoundUpTo(init_size, PAGE_SIZE); | |
213 | DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID); | |
5d3805fc | 214 | uintptr_t base; |
eac97531 | 215 | zx_handle_t vmar; |
5d3805fc | 216 | zx_status_t status = |
b667dd70 ML |
217 | _zx_vmar_allocate( |
218 | _zx_vmar_root_self(), | |
219 | ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, | |
220 | 0, init_size, &vmar, &base); | |
5d3805fc | 221 | if (status != ZX_OK) |
eac97531 ML |
222 | ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status); |
223 | base_ = reinterpret_cast<void *>(base); | |
224 | size_ = init_size; | |
225 | name_ = name; | |
226 | os_handle_ = vmar; | |
5d3805fc | 227 | |
eac97531 | 228 | return reinterpret_cast<uptr>(base_); |
5d3805fc JJ |
229 | } |
230 | ||
eac97531 ML |
231 | static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size, |
232 | void *base, const char *name, bool die_for_nomem) { | |
233 | uptr offset = fixed_addr - reinterpret_cast<uptr>(base); | |
234 | map_size = RoundUpTo(map_size, PAGE_SIZE); | |
5d3805fc | 235 | zx_handle_t vmo; |
eac97531 | 236 | zx_status_t status = _zx_vmo_create(map_size, 0, &vmo); |
5d3805fc JJ |
237 | if (status != ZX_OK) { |
238 | if (status != ZX_ERR_NO_MEMORY || die_for_nomem) | |
eac97531 ML |
239 | ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status); |
240 | return 0; | |
5d3805fc | 241 | } |
eac97531 ML |
242 | _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name)); |
243 | DCHECK_GE(base + size_, map_size + offset); | |
5d3805fc | 244 | uintptr_t addr; |
eac97531 ML |
245 | |
246 | status = | |
247 | _zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC, | |
248 | offset, vmo, 0, map_size, &addr); | |
5d3805fc JJ |
249 | _zx_handle_close(vmo); |
250 | if (status != ZX_OK) { | |
eac97531 ML |
251 | if (status != ZX_ERR_NO_MEMORY || die_for_nomem) { |
252 | ReportMmapFailureAndDie(map_size, name, "zx_vmar_map", status); | |
253 | } | |
254 | return 0; | |
5d3805fc | 255 | } |
eac97531 ML |
256 | IncreaseTotalMmap(map_size); |
257 | return addr; | |
258 | } | |
5d3805fc | 259 | |
b667dd70 ML |
260 | uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size, |
261 | const char *name) { | |
eac97531 ML |
262 | return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, |
263 | name_, false); | |
264 | } | |
5d3805fc | 265 | |
b667dd70 ML |
266 | uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size, |
267 | const char *name) { | |
eac97531 ML |
268 | return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, |
269 | name_, true); | |
5d3805fc JJ |
270 | } |
271 | ||
eac97531 ML |
272 | void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) { |
273 | if (!addr || !size) return; | |
274 | size = RoundUpTo(size, PAGE_SIZE); | |
275 | ||
276 | zx_status_t status = | |
277 | _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size); | |
278 | if (status != ZX_OK) { | |
279 | Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n", | |
280 | SanitizerToolName, size, size, addr); | |
281 | CHECK("unable to unmap" && 0); | |
282 | } | |
283 | ||
284 | DecreaseTotalMmap(size); | |
5d3805fc JJ |
285 | } |
286 | ||
eac97531 ML |
287 | void ReservedAddressRange::Unmap(uptr addr, uptr size) { |
288 | CHECK_LE(size, size_); | |
289 | const zx_handle_t vmar = static_cast<zx_handle_t>(os_handle_); | |
290 | if (addr == reinterpret_cast<uptr>(base_)) { | |
291 | if (size == size_) { | |
292 | // Destroying the vmar effectively unmaps the whole mapping. | |
293 | _zx_vmar_destroy(vmar); | |
294 | _zx_handle_close(vmar); | |
295 | os_handle_ = static_cast<uptr>(ZX_HANDLE_INVALID); | |
296 | DecreaseTotalMmap(size); | |
297 | return; | |
298 | } | |
299 | } else { | |
300 | CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_); | |
301 | } | |
302 | // Partial unmapping does not affect the fact that the initial range is still | |
303 | // reserved, and the resulting unmapped memory can't be reused. | |
304 | UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar); | |
5d3805fc JJ |
305 | } |
306 | ||
307 | // This should never be called. | |
308 | void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) { | |
309 | UNIMPLEMENTED(); | |
310 | } | |
311 | ||
312 | void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, | |
313 | const char *mem_type) { | |
314 | CHECK_GE(size, PAGE_SIZE); | |
315 | CHECK(IsPowerOfTwo(size)); | |
316 | CHECK(IsPowerOfTwo(alignment)); | |
317 | ||
318 | zx_handle_t vmo; | |
319 | zx_status_t status = _zx_vmo_create(size, 0, &vmo); | |
320 | if (status != ZX_OK) { | |
321 | if (status != ZX_ERR_NO_MEMORY) | |
322 | ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false); | |
323 | return nullptr; | |
324 | } | |
325 | _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type, | |
326 | internal_strlen(mem_type)); | |
327 | ||
328 | // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that? | |
329 | ||
330 | // Map a larger size to get a chunk of address space big enough that | |
331 | // it surely contains an aligned region of the requested size. Then | |
332 | // overwrite the aligned middle portion with a mapping from the | |
333 | // beginning of the VMO, and unmap the excess before and after. | |
334 | size_t map_size = size + alignment; | |
335 | uintptr_t addr; | |
eac97531 ML |
336 | status = |
337 | _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0, | |
338 | vmo, 0, map_size, &addr); | |
5d3805fc JJ |
339 | if (status == ZX_OK) { |
340 | uintptr_t map_addr = addr; | |
341 | uintptr_t map_end = map_addr + map_size; | |
342 | addr = RoundUpTo(map_addr, alignment); | |
343 | uintptr_t end = addr + size; | |
344 | if (addr != map_addr) { | |
345 | zx_info_vmar_t info; | |
346 | status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info, | |
347 | sizeof(info), NULL, NULL); | |
348 | if (status == ZX_OK) { | |
349 | uintptr_t new_addr; | |
eac97531 ML |
350 | status = _zx_vmar_map( |
351 | _zx_vmar_root_self(), | |
352 | ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE, | |
353 | addr - info.base, vmo, 0, size, &new_addr); | |
5d3805fc JJ |
354 | if (status == ZX_OK) CHECK_EQ(new_addr, addr); |
355 | } | |
356 | } | |
357 | if (status == ZX_OK && addr != map_addr) | |
358 | status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr); | |
359 | if (status == ZX_OK && end != map_end) | |
360 | status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end); | |
361 | } | |
362 | _zx_handle_close(vmo); | |
363 | ||
364 | if (status != ZX_OK) { | |
365 | if (status != ZX_ERR_NO_MEMORY) | |
366 | ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false); | |
367 | return nullptr; | |
368 | } | |
369 | ||
370 | IncreaseTotalMmap(size); | |
371 | ||
372 | return reinterpret_cast<void *>(addr); | |
373 | } | |
374 | ||
375 | void UnmapOrDie(void *addr, uptr size) { | |
eac97531 | 376 | UnmapOrDieVmar(addr, size, _zx_vmar_root_self()); |
5d3805fc JJ |
377 | } |
378 | ||
379 | // This is used on the shadow mapping, which cannot be changed. | |
380 | // Zircon doesn't have anything like MADV_DONTNEED. | |
381 | void ReleaseMemoryPagesToOS(uptr beg, uptr end) {} | |
382 | ||
383 | void DumpProcessMap() { | |
eac97531 ML |
384 | // TODO(mcgrathr): write it |
385 | return; | |
5d3805fc JJ |
386 | } |
387 | ||
388 | bool IsAccessibleMemoryRange(uptr beg, uptr size) { | |
389 | // TODO(mcgrathr): Figure out a better way. | |
390 | zx_handle_t vmo; | |
391 | zx_status_t status = _zx_vmo_create(size, 0, &vmo); | |
392 | if (status == ZX_OK) { | |
eac97531 | 393 | status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size); |
5d3805fc JJ |
394 | _zx_handle_close(vmo); |
395 | } | |
396 | return status == ZX_OK; | |
397 | } | |
398 | ||
399 | // FIXME implement on this platform. | |
400 | void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {} | |
401 | ||
402 | bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, | |
403 | uptr *read_len, uptr max_len, error_t *errno_p) { | |
404 | zx_handle_t vmo; | |
405 | zx_status_t status = __sanitizer_get_configuration(file_name, &vmo); | |
406 | if (status == ZX_OK) { | |
407 | uint64_t vmo_size; | |
408 | status = _zx_vmo_get_size(vmo, &vmo_size); | |
409 | if (status == ZX_OK) { | |
410 | if (vmo_size < max_len) max_len = vmo_size; | |
411 | size_t map_size = RoundUpTo(max_len, PAGE_SIZE); | |
412 | uintptr_t addr; | |
eac97531 ML |
413 | status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0, |
414 | map_size, &addr); | |
5d3805fc JJ |
415 | if (status == ZX_OK) { |
416 | *buff = reinterpret_cast<char *>(addr); | |
417 | *buff_size = map_size; | |
418 | *read_len = max_len; | |
419 | } | |
420 | } | |
421 | _zx_handle_close(vmo); | |
422 | } | |
423 | if (status != ZX_OK && errno_p) *errno_p = status; | |
424 | return status == ZX_OK; | |
425 | } | |
426 | ||
427 | void RawWrite(const char *buffer) { | |
eac97531 ML |
428 | constexpr size_t size = 128; |
429 | static _Thread_local char line[size]; | |
430 | static _Thread_local size_t lastLineEnd = 0; | |
431 | static _Thread_local size_t cur = 0; | |
432 | ||
433 | while (*buffer) { | |
434 | if (cur >= size) { | |
435 | if (lastLineEnd == 0) | |
436 | lastLineEnd = size; | |
437 | __sanitizer_log_write(line, lastLineEnd); | |
438 | internal_memmove(line, line + lastLineEnd, cur - lastLineEnd); | |
439 | cur = cur - lastLineEnd; | |
440 | lastLineEnd = 0; | |
441 | } | |
442 | if (*buffer == '\n') | |
443 | lastLineEnd = cur + 1; | |
444 | line[cur++] = *buffer++; | |
445 | } | |
446 | // Flush all complete lines before returning. | |
447 | if (lastLineEnd != 0) { | |
448 | __sanitizer_log_write(line, lastLineEnd); | |
449 | internal_memmove(line, line + lastLineEnd, cur - lastLineEnd); | |
450 | cur = cur - lastLineEnd; | |
451 | lastLineEnd = 0; | |
452 | } | |
5d3805fc JJ |
453 | } |
454 | ||
455 | void CatastrophicErrorWrite(const char *buffer, uptr length) { | |
456 | __sanitizer_log_write(buffer, length); | |
457 | } | |
458 | ||
459 | char **StoredArgv; | |
460 | char **StoredEnviron; | |
461 | ||
462 | char **GetArgv() { return StoredArgv; } | |
b667dd70 | 463 | char **GetEnviron() { return StoredEnviron; } |
5d3805fc JJ |
464 | |
465 | const char *GetEnv(const char *name) { | |
466 | if (StoredEnviron) { | |
467 | uptr NameLen = internal_strlen(name); | |
468 | for (char **Env = StoredEnviron; *Env != 0; Env++) { | |
469 | if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=') | |
470 | return (*Env) + NameLen + 1; | |
471 | } | |
472 | } | |
473 | return nullptr; | |
474 | } | |
475 | ||
476 | uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) { | |
eac97531 ML |
477 | const char *argv0 = "<UNKNOWN>"; |
478 | if (StoredArgv && StoredArgv[0]) { | |
479 | argv0 = StoredArgv[0]; | |
480 | } | |
5d3805fc JJ |
481 | internal_strncpy(buf, argv0, buf_len); |
482 | return internal_strlen(buf); | |
483 | } | |
484 | ||
485 | uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) { | |
486 | return ReadBinaryName(buf, buf_len); | |
487 | } | |
488 | ||
489 | uptr MainThreadStackBase, MainThreadStackSize; | |
490 | ||
491 | bool GetRandom(void *buffer, uptr length, bool blocking) { | |
492 | CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN); | |
eac97531 | 493 | _zx_cprng_draw(buffer, length); |
5d3805fc JJ |
494 | return true; |
495 | } | |
496 | ||
eac97531 ML |
497 | u32 GetNumberOfCPUs() { |
498 | return zx_system_get_num_cpus(); | |
499 | } | |
500 | ||
501 | uptr GetRSS() { UNIMPLEMENTED(); } | |
502 | ||
5d3805fc JJ |
503 | } // namespace __sanitizer |
504 | ||
3ca75cd5 | 505 | using namespace __sanitizer; |
5d3805fc JJ |
506 | |
507 | extern "C" { | |
508 | void __sanitizer_startup_hook(int argc, char **argv, char **envp, | |
509 | void *stack_base, size_t stack_size) { | |
510 | __sanitizer::StoredArgv = argv; | |
511 | __sanitizer::StoredEnviron = envp; | |
512 | __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base); | |
513 | __sanitizer::MainThreadStackSize = stack_size; | |
514 | } | |
515 | ||
516 | void __sanitizer_set_report_path(const char *path) { | |
517 | // Handle the initialization code in each sanitizer, but no other calls. | |
518 | // This setting is never consulted on Fuchsia. | |
519 | DCHECK_EQ(path, common_flags()->log_path); | |
520 | } | |
521 | ||
522 | void __sanitizer_set_report_fd(void *fd) { | |
523 | UNREACHABLE("not available on Fuchsia"); | |
524 | } | |
525 | } // extern "C" | |
526 | ||
527 | #endif // SANITIZER_FUCHSIA |