prelude::*,
seq_file::SeqFile,
seq_print,
+ sync::atomic::{ordering::Relaxed, Atomic},
sync::poll::PollTable,
sync::Arc,
task::Pid,
use crate::{context::Context, page_range::Shrinker, process::Process, thread::Thread};
-use core::{
- ptr::NonNull,
- sync::atomic::{AtomicBool, AtomicUsize, Ordering},
-};
+use core::ptr::NonNull;
mod allocation;
mod context;
}
fn next_debug_id() -> usize {
- static NEXT_DEBUG_ID: AtomicUsize = AtomicUsize::new(0);
+ static NEXT_DEBUG_ID: Atomic<usize> = Atomic::new(0);
- NEXT_DEBUG_ID.fetch_add(1, Ordering::Relaxed)
+ NEXT_DEBUG_ID.fetch_add(1, Relaxed)
}
/// Provides a single place to write Binder return values via the
struct DeliverCode {
code: u32,
- skip: AtomicBool,
+ skip: Atomic<bool>,
}
kernel::list::impl_list_arc_safe! {
fn new(code: u32) -> Self {
Self {
code,
- skip: AtomicBool::new(false),
+ skip: Atomic::new(false),
}
}
/// This is used instead of removing it from the work list, since `LinkedList::remove` is
/// unsafe, whereas this method is not.
fn skip(&self) {
- self.skip.store(true, Ordering::Relaxed);
+ self.skip.store(true, Relaxed);
}
}
_thread: &Thread,
writer: &mut BinderReturnWriter<'_>,
) -> Result<bool> {
- if !self.skip.load(Ordering::Relaxed) {
+ if !self.skip.load(Relaxed) {
writer.write_code(self.code)?;
}
Ok(true)
fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
seq_print!(m, "{}", prefix);
- if self.skip.load(Ordering::Relaxed) {
+ if self.skip.load(Relaxed) {
seq_print!(m, "(skipped) ");
}
if self.code == defs::BR_TRANSACTION_COMPLETE {
//! Keep track of statistics for binder_logs.
use crate::defs::*;
-use core::sync::atomic::{AtomicU32, Ordering::Relaxed};
+use kernel::sync::atomic::{ordering::Relaxed, Atomic};
use kernel::{ioctl::_IOC_NR, seq_file::SeqFile, seq_print};
const BC_COUNT: usize = _IOC_NR(BC_REPLY_SG) as usize + 1;
pub(crate) static GLOBAL_STATS: BinderStats = BinderStats::new();
pub(crate) struct BinderStats {
- bc: [AtomicU32; BC_COUNT],
- br: [AtomicU32; BR_COUNT],
+ bc: [Atomic<u32>; BC_COUNT],
+ br: [Atomic<u32>; BR_COUNT],
}
impl BinderStats {
pub(crate) const fn new() -> Self {
#[expect(clippy::declare_interior_mutable_const)]
- const ZERO: AtomicU32 = AtomicU32::new(0);
+ const ZERO: Atomic<u32> = Atomic::new(0);
Self {
bc: [ZERO; BC_COUNT],
security,
seq_file::SeqFile,
seq_print,
+ sync::atomic::{ordering::Relaxed, Atomic},
sync::poll::{PollCondVar, PollTable},
sync::{Arc, SpinLock},
task::Task,
BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverCode, DeliverToRead,
};
-use core::{
- mem::size_of,
- sync::atomic::{AtomicU32, Ordering},
-};
+use core::mem::size_of;
/// Stores the layout of the scatter-gather entries. This is used during the `translate_objects`
/// call and is discarded when it returns.
impl InnerThread {
fn new() -> Result<Self> {
fn next_err_id() -> u32 {
- static EE_ID: AtomicU32 = AtomicU32::new(0);
- EE_ID.fetch_add(1, Ordering::Relaxed)
+ static EE_ID: Atomic<u32> = Atomic::new(0);
+ EE_ID.fetch_add(1, Relaxed)
}
Ok(Self {
#[pin_data]
struct ThreadError {
- error_code: AtomicU32,
+ error_code: Atomic<u32>,
#[pin]
links_track: AtomicTracker,
}
impl ThreadError {
fn try_new() -> Result<DArc<Self>> {
DTRWrap::arc_pin_init(pin_init!(Self {
- error_code: AtomicU32::new(BR_OK),
+ error_code: Atomic::new(BR_OK),
links_track <- AtomicTracker::new(),
}))
.map(ListArc::into_arc)
}
fn set_error_code(&self, code: u32) {
- self.error_code.store(code, Ordering::Relaxed);
+ self.error_code.store(code, Relaxed);
}
fn is_unused(&self) -> bool {
- self.error_code.load(Ordering::Relaxed) == BR_OK
+ self.error_code.load(Relaxed) == BR_OK
}
}
_thread: &Thread,
writer: &mut BinderReturnWriter<'_>,
) -> Result<bool> {
- let code = self.error_code.load(Ordering::Relaxed);
- self.error_code.store(BR_OK, Ordering::Relaxed);
+ let code = self.error_code.load(Relaxed);
+ self.error_code.store(BR_OK, Relaxed);
writer.write_code(code)?;
Ok(true)
}
m,
"{}transaction error: {}\n",
prefix,
- self.error_code.load(Ordering::Relaxed)
+ self.error_code.load(Relaxed)
);
Ok(())
}
// Copyright (C) 2025 Google LLC.
-use core::sync::atomic::{AtomicBool, Ordering};
use kernel::{
prelude::*,
seq_file::SeqFile,
seq_print,
+ sync::atomic::{ordering::Relaxed, Atomic},
sync::{Arc, SpinLock},
task::Kuid,
time::{Instant, Monotonic},
pub(crate) to: Arc<Process>,
#[pin]
allocation: SpinLock<Option<Allocation>>,
- is_outstanding: AtomicBool,
+ is_outstanding: Atomic<bool>,
code: u32,
pub(crate) flags: u32,
data_size: usize,
offsets_size: trd.offsets_size as _,
data_address,
allocation <- kernel::new_spinlock!(Some(alloc.success()), "Transaction::new"),
- is_outstanding: AtomicBool::new(false),
+ is_outstanding: Atomic::new(false),
txn_security_ctx_off,
oneway_spam_detected,
start_time: Instant::now(),
offsets_size: trd.offsets_size as _,
data_address: alloc.ptr,
allocation <- kernel::new_spinlock!(Some(alloc.success()), "Transaction::new"),
- is_outstanding: AtomicBool::new(false),
+ is_outstanding: Atomic::new(false),
txn_security_ctx_off: None,
oneway_spam_detected,
start_time: Instant::now(),
pub(crate) fn set_outstanding(&self, to_process: &mut ProcessInner) {
// No race because this method is only called once.
- if !self.is_outstanding.load(Ordering::Relaxed) {
- self.is_outstanding.store(true, Ordering::Relaxed);
+ if !self.is_outstanding.load(Relaxed) {
+ self.is_outstanding.store(true, Relaxed);
to_process.add_outstanding_txn();
}
}
// destructor, which is guaranteed to not race with any other operations on the
// transaction. It also cannot race with `set_outstanding`, since submission happens
// before delivery.
- if self.is_outstanding.load(Ordering::Relaxed) {
- self.is_outstanding.store(false, Ordering::Relaxed);
+ if self.is_outstanding.load(Relaxed) {
+ self.is_outstanding.store(false, Relaxed);
self.to.drop_outstanding_txn();
}
}