forked from RustPython/RustPython
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrefcount.rs
More file actions
280 lines (248 loc) · 7.92 KB
/
refcount.rs
File metadata and controls
280 lines (248 loc) · 7.92 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
use crate::atomic::{Ordering, PyAtomic, Radium};
// State layout (usize):
// [1 bit: destructed] [1 bit: reserved] [1 bit: leaked] [N bits: weak_count] [M bits: strong_count]
// 64-bit: N=30, M=31. 32-bit: N=14, M=15.
const FLAG_BITS: u32 = 3;
const DESTRUCTED: usize = 1 << (usize::BITS - 1);
const LEAKED: usize = 1 << (usize::BITS - 3);
const TOTAL_COUNT_WIDTH: u32 = usize::BITS - FLAG_BITS;
const WEAK_WIDTH: u32 = TOTAL_COUNT_WIDTH / 2;
const STRONG_WIDTH: u32 = TOTAL_COUNT_WIDTH - WEAK_WIDTH;
const STRONG: usize = (1 << STRONG_WIDTH) - 1;
const COUNT: usize = 1;
const WEAK_COUNT: usize = 1 << STRONG_WIDTH;
#[inline(never)]
#[cold]
#[allow(
clippy::disallowed_methods,
reason = "refcount overflow must preserve upstream abort semantics"
)]
fn refcount_overflow() -> ! {
#[cfg(feature = "std")]
std::process::abort();
#[cfg(not(feature = "std"))]
core::panic!("refcount overflow");
}
/// State wraps reference count + flags in a single word (platform usize)
#[derive(Clone, Copy)]
struct State {
inner: usize,
}
impl State {
#[inline]
fn from_raw(inner: usize) -> Self {
Self { inner }
}
#[inline]
fn as_raw(self) -> usize {
self.inner
}
#[inline]
fn strong(self) -> u32 {
((self.inner & STRONG) / COUNT) as u32
}
#[inline]
fn destructed(self) -> bool {
(self.inner & DESTRUCTED) != 0
}
#[inline]
fn leaked(self) -> bool {
(self.inner & LEAKED) != 0
}
#[inline]
fn add_strong(self, val: u32) -> Self {
Self::from_raw(self.inner + (val as usize) * COUNT)
}
#[inline]
fn with_leaked(self, leaked: bool) -> Self {
Self::from_raw((self.inner & !LEAKED) | if leaked { LEAKED } else { 0 })
}
}
/// Reference count using state layout with LEAKED support.
///
/// State layout (usize):
/// 64-bit: [1 bit: destructed] [1 bit: reserved] [1 bit: leaked] [30 bits: weak_count] [31 bits: strong_count]
/// 32-bit: [1 bit: destructed] [1 bit: reserved] [1 bit: leaked] [14 bits: weak_count] [15 bits: strong_count]
pub struct RefCount {
state: PyAtomic<usize>,
}
impl Default for RefCount {
fn default() -> Self {
Self::new()
}
}
impl RefCount {
/// Create a new RefCount with strong count = 1
pub fn new() -> Self {
// Initial state: strong=1, weak=1 (implicit weak for strong refs)
Self {
state: Radium::new(COUNT + WEAK_COUNT),
}
}
/// Get current strong count
#[inline]
pub fn get(&self) -> usize {
State::from_raw(self.state.load(Ordering::Relaxed)).strong() as usize
}
/// Increment strong count
#[inline]
pub fn inc(&self) {
let val = State::from_raw(self.state.fetch_add(COUNT, Ordering::Relaxed));
if val.destructed() || (val.strong() as usize) > STRONG - 1 {
refcount_overflow();
}
if val.strong() == 0 {
// The previous fetch_add created a permission to run decrement again
self.state.fetch_add(COUNT, Ordering::Relaxed);
}
}
#[inline]
pub fn inc_by(&self, n: usize) {
debug_assert!(n <= STRONG);
let val = State::from_raw(self.state.fetch_add(n * COUNT, Ordering::Relaxed));
if val.destructed() || (val.strong() as usize) > STRONG - n {
refcount_overflow();
}
}
/// Returns true if successful
#[inline]
#[must_use]
pub fn safe_inc(&self) -> bool {
let mut old = State::from_raw(self.state.load(Ordering::Relaxed));
loop {
if old.destructed() || old.strong() == 0 {
return false;
}
if (old.strong() as usize) >= STRONG {
refcount_overflow();
}
let new_state = old.add_strong(1);
match self.state.compare_exchange_weak(
old.as_raw(),
new_state.as_raw(),
Ordering::Relaxed,
Ordering::Relaxed,
) {
Ok(_) => return true,
Err(curr) => old = State::from_raw(curr),
}
}
}
/// Decrement strong count. Returns true when count drops to 0.
#[inline]
#[must_use]
pub fn dec(&self) -> bool {
let old = State::from_raw(self.state.fetch_sub(COUNT, Ordering::Release));
// LEAKED objects never reach 0
if old.leaked() {
return false;
}
if old.strong() == 1 {
core::sync::atomic::fence(Ordering::Acquire);
return true;
}
false
}
/// Mark this object as leaked (interned). It will never be deallocated.
pub fn leak(&self) {
debug_assert!(!self.is_leaked());
let mut old = State::from_raw(self.state.load(Ordering::Relaxed));
loop {
let new_state = old.with_leaked(true);
match self.state.compare_exchange_weak(
old.as_raw(),
new_state.as_raw(),
Ordering::AcqRel,
Ordering::Relaxed,
) {
Ok(_) => return,
Err(curr) => old = State::from_raw(curr),
}
}
}
/// Check if this object is leaked (interned).
pub fn is_leaked(&self) -> bool {
State::from_raw(self.state.load(Ordering::Acquire)).leaked()
}
}
// Deferred Drop Infrastructure
//
// This mechanism allows untrack_object() calls to be deferred until after
// the GC collection phase completes, preventing deadlocks that occur when
// clear (pop_edges) triggers object destruction while holding the tracked_objects lock.
#[cfg(feature = "std")]
use core::cell::{Cell, RefCell};
#[cfg(feature = "std")]
thread_local! {
/// Flag indicating if we're inside a deferred drop context.
/// When true, drop operations should defer untrack calls.
static IN_DEFERRED_CONTEXT: Cell<bool> = const { Cell::new(false) };
/// Queue of deferred untrack operations.
/// No Send bound needed - this is thread-local and only accessed from the same thread.
static DEFERRED_QUEUE: RefCell<Vec<Box<dyn FnOnce()>>> = const { RefCell::new(Vec::new()) };
}
#[cfg(feature = "std")]
struct DeferredDropGuard {
was_in_context: bool,
}
#[cfg(feature = "std")]
impl Drop for DeferredDropGuard {
fn drop(&mut self) {
IN_DEFERRED_CONTEXT.with(|in_ctx| {
in_ctx.set(self.was_in_context);
});
// Only flush if we're the outermost context and not already panicking
// (flushing during unwinding risks double-panic → process abort).
if !self.was_in_context && !std::thread::panicking() {
flush_deferred_drops();
}
}
}
/// Execute a function within a deferred drop context.
/// Any calls to `try_defer_drop` within this context will be queued
/// and executed when the context exits (even on panic).
#[cfg(feature = "std")]
#[inline]
pub fn with_deferred_drops<F, R>(f: F) -> R
where
F: FnOnce() -> R,
{
let _guard = IN_DEFERRED_CONTEXT.with(|in_ctx| {
let was_in_context = in_ctx.get();
in_ctx.set(true);
DeferredDropGuard { was_in_context }
});
f()
}
/// Try to defer a drop-related operation.
/// If inside a deferred context, the operation is queued.
/// Otherwise, it executes immediately.
#[cfg(feature = "std")]
#[inline]
pub fn try_defer_drop<F>(f: F)
where
F: FnOnce() + 'static,
{
let should_defer = IN_DEFERRED_CONTEXT.with(|in_ctx| in_ctx.get());
if should_defer {
DEFERRED_QUEUE.with(|q| {
q.borrow_mut().push(Box::new(f));
});
} else {
f();
}
}
/// Flush all deferred drop operations.
/// This is automatically called when exiting a deferred context.
#[cfg(feature = "std")]
#[inline]
pub fn flush_deferred_drops() {
DEFERRED_QUEUE.with(|q| {
// Take all queued operations
let ops: Vec<_> = q.borrow_mut().drain(..).collect();
// Execute them outside the borrow
for op in ops {
op();
}
});
}