Skip to content

Commit 53451c2

Browse files
committed
move addr_from_alloc_id logic into its own function
1 parent b71297f commit 53451c2

File tree

1 file changed

+106
-105
lines changed
  • src/tools/miri/src/alloc_addresses

1 file changed

+106
-105
lines changed

src/tools/miri/src/alloc_addresses/mod.rs

Lines changed: 106 additions & 105 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@ mod reuse_pool;
55

66
use std::cell::RefCell;
77
use std::cmp::max;
8-
use std::collections::hash_map::Entry;
98

109
use rand::Rng;
1110

@@ -151,6 +150,95 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
151150
}
152151
}
153152

153+
fn addr_from_alloc_id_uncached(
154+
&self,
155+
global_state: &mut GlobalStateInner,
156+
alloc_id: AllocId,
157+
memory_kind: MemoryKind,
158+
) -> InterpResult<'tcx, u64> {
159+
let ecx = self.eval_context_ref();
160+
let mut rng = ecx.machine.rng.borrow_mut();
161+
let (size, align, kind) = ecx.get_alloc_info(alloc_id);
162+
// This is either called immediately after allocation (and then cached), or when
163+
// adjusting `tcx` pointers (which never get freed). So assert that we are looking
164+
// at a live allocation. This also ensures that we never re-assign an address to an
165+
// allocation that previously had an address, but then was freed and the address
166+
// information was removed.
167+
assert!(!matches!(kind, AllocKind::Dead));
168+
169+
// This allocation does not have a base address yet, pick or reuse one.
170+
if ecx.machine.native_lib.is_some() {
171+
// In native lib mode, we use the "real" address of the bytes for this allocation.
172+
// This ensures the interpreted program and native code have the same view of memory.
173+
let base_ptr = match kind {
174+
AllocKind::LiveData => {
175+
if ecx.tcx.try_get_global_alloc(alloc_id).is_some() {
176+
// For new global allocations, we always pre-allocate the memory to be able use the machine address directly.
177+
let prepared_bytes = MiriAllocBytes::zeroed(size, align)
178+
.unwrap_or_else(|| {
179+
panic!("Miri ran out of memory: cannot create allocation of {size:?} bytes")
180+
});
181+
let ptr = prepared_bytes.as_ptr();
182+
// Store prepared allocation space to be picked up for use later.
183+
global_state
184+
.prepared_alloc_bytes
185+
.try_insert(alloc_id, prepared_bytes)
186+
.unwrap();
187+
ptr
188+
} else {
189+
ecx.get_alloc_bytes_unchecked_raw(alloc_id)?
190+
}
191+
}
192+
AllocKind::Function | AllocKind::VTable => {
193+
// Allocate some dummy memory to get a unique address for this function/vtable.
194+
let alloc_bytes =
195+
MiriAllocBytes::from_bytes(&[0u8; 1], Align::from_bytes(1).unwrap());
196+
let ptr = alloc_bytes.as_ptr();
197+
// Leak the underlying memory to ensure it remains unique.
198+
std::mem::forget(alloc_bytes);
199+
ptr
200+
}
201+
AllocKind::Dead => unreachable!(),
202+
};
203+
// Ensure this pointer's provenance is exposed, so that it can be used by FFI code.
204+
return Ok(base_ptr.expose_provenance().try_into().unwrap());
205+
}
206+
// We are not in native lib mode, so we control the addresses ourselves.
207+
if let Some((reuse_addr, clock)) =
208+
global_state.reuse.take_addr(&mut *rng, size, align, memory_kind, ecx.active_thread())
209+
{
210+
if let Some(clock) = clock {
211+
ecx.acquire_clock(&clock);
212+
}
213+
Ok(reuse_addr)
214+
} else {
215+
// We have to pick a fresh address.
216+
// Leave some space to the previous allocation, to give it some chance to be less aligned.
217+
// We ensure that `(global_state.next_base_addr + slack) % 16` is uniformly distributed.
218+
let slack = rng.gen_range(0..16);
219+
// From next_base_addr + slack, round up to adjust for alignment.
220+
let base_addr = global_state
221+
.next_base_addr
222+
.checked_add(slack)
223+
.ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
224+
let base_addr = align_addr(base_addr, align.bytes());
225+
226+
// Remember next base address. If this allocation is zero-sized, leave a gap of at
227+
// least 1 to avoid two allocations having the same base address. (The logic in
228+
// `alloc_id_from_addr` assumes unique addresses, and different function/vtable pointers
229+
// need to be distinguishable!)
230+
global_state.next_base_addr = base_addr
231+
.checked_add(max(size.bytes(), 1))
232+
.ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
233+
// Even if `Size` didn't overflow, we might still have filled up the address space.
234+
if global_state.next_base_addr > ecx.target_usize_max() {
235+
throw_exhaust!(AddressSpaceFull);
236+
}
237+
238+
Ok(base_addr)
239+
}
240+
}
241+
154242
fn addr_from_alloc_id(
155243
&self,
156244
alloc_id: AllocId,
@@ -160,104 +248,16 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
160248
let mut global_state = ecx.machine.alloc_addresses.borrow_mut();
161249
let global_state = &mut *global_state;
162250

163-
Ok(match global_state.base_addr.entry(alloc_id) {
164-
Entry::Occupied(entry) => *entry.get(),
165-
Entry::Vacant(entry) => {
166-
let mut rng = ecx.machine.rng.borrow_mut();
167-
let (size, align, kind) = ecx.get_alloc_info(alloc_id);
168-
// This is either called immediately after allocation (and then cached), or when
169-
// adjusting `tcx` pointers (which never get freed). So assert that we are looking
170-
// at a live allocation. This also ensures that we never re-assign an address to an
171-
// allocation that previously had an address, but then was freed and the address
172-
// information was removed.
173-
assert!(!matches!(kind, AllocKind::Dead));
174-
175-
// This allocation does not have a base address yet, pick or reuse one.
176-
let base_addr = if ecx.machine.native_lib.is_some() {
177-
// In native lib mode, we use the "real" address of the bytes for this allocation.
178-
// This ensures the interpreted program and native code have the same view of memory.
179-
match kind {
180-
AllocKind::LiveData => {
181-
let ptr = if ecx.tcx.try_get_global_alloc(alloc_id).is_some() {
182-
// For new global allocations, we always pre-allocate the memory to be able use the machine address directly.
183-
let prepared_bytes = MiriAllocBytes::zeroed(size, align)
184-
.unwrap_or_else(|| {
185-
panic!("Miri ran out of memory: cannot create allocation of {size:?} bytes")
186-
});
187-
let ptr = prepared_bytes.as_ptr();
188-
// Store prepared allocation space to be picked up for use later.
189-
global_state
190-
.prepared_alloc_bytes
191-
.try_insert(alloc_id, prepared_bytes)
192-
.unwrap();
193-
ptr
194-
} else {
195-
ecx.get_alloc_bytes_unchecked_raw(alloc_id)?
196-
};
197-
// Ensure this pointer's provenance is exposed, so that it can be used by FFI code.
198-
ptr.expose_provenance().try_into().unwrap()
199-
}
200-
AllocKind::Function | AllocKind::VTable => {
201-
// Allocate some dummy memory to get a unique address for this function/vtable.
202-
let alloc_bytes = MiriAllocBytes::from_bytes(
203-
&[0u8; 1],
204-
Align::from_bytes(1).unwrap(),
205-
);
206-
// We don't need to expose these bytes as nobody is allowed to access them.
207-
let addr = alloc_bytes.as_ptr().addr().try_into().unwrap();
208-
// Leak the underlying memory to ensure it remains unique.
209-
std::mem::forget(alloc_bytes);
210-
addr
211-
}
212-
AllocKind::Dead => unreachable!(),
213-
}
214-
} else if let Some((reuse_addr, clock)) = global_state.reuse.take_addr(
215-
&mut *rng,
216-
size,
217-
align,
218-
memory_kind,
219-
ecx.active_thread(),
220-
) {
221-
if let Some(clock) = clock {
222-
ecx.acquire_clock(&clock);
223-
}
224-
reuse_addr
225-
} else {
226-
// We have to pick a fresh address.
227-
// Leave some space to the previous allocation, to give it some chance to be less aligned.
228-
// We ensure that `(global_state.next_base_addr + slack) % 16` is uniformly distributed.
229-
let slack = rng.gen_range(0..16);
230-
// From next_base_addr + slack, round up to adjust for alignment.
231-
let base_addr = global_state
232-
.next_base_addr
233-
.checked_add(slack)
234-
.ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
235-
let base_addr = align_addr(base_addr, align.bytes());
236-
237-
// Remember next base address. If this allocation is zero-sized, leave a gap
238-
// of at least 1 to avoid two allocations having the same base address.
239-
// (The logic in `alloc_id_from_addr` assumes unique addresses, and different
240-
// function/vtable pointers need to be distinguishable!)
241-
global_state.next_base_addr = base_addr
242-
.checked_add(max(size.bytes(), 1))
243-
.ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
244-
// Even if `Size` didn't overflow, we might still have filled up the address space.
245-
if global_state.next_base_addr > ecx.target_usize_max() {
246-
throw_exhaust!(AddressSpaceFull);
247-
}
248-
249-
base_addr
250-
};
251-
trace!(
252-
"Assigning base address {:#x} to allocation {:?} (size: {}, align: {})",
253-
base_addr,
254-
alloc_id,
255-
size.bytes(),
256-
align.bytes(),
257-
);
251+
match global_state.base_addr.get(&alloc_id) {
252+
Some(&addr) => Ok(addr),
253+
None => {
254+
// First time we're looking for the absolute address of this allocation.
255+
let base_addr =
256+
self.addr_from_alloc_id_uncached(global_state, alloc_id, memory_kind)?;
257+
trace!("Assigning base address {:#x} to allocation {:?}", base_addr, alloc_id);
258258

259259
// Store address in cache.
260-
entry.insert(base_addr);
260+
global_state.base_addr.try_insert(alloc_id, base_addr).unwrap();
261261

262262
// Also maintain the opposite mapping in `int_to_ptr_map`, ensuring we keep it sorted.
263263
// We have a fast-path for the common case that this address is bigger than all previous ones.
@@ -275,9 +275,9 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
275275
};
276276
global_state.int_to_ptr_map.insert(pos, (base_addr, alloc_id));
277277

278-
base_addr
278+
Ok(base_addr)
279279
}
280-
})
280+
}
281281
}
282282
}
283283

@@ -373,14 +373,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
373373
align: Align,
374374
) -> InterpResult<'tcx, MiriAllocBytes> {
375375
let ecx = self.eval_context_ref();
376-
Ok(if ecx.machine.native_lib.is_some() {
376+
if ecx.machine.native_lib.is_some() {
377377
// In native lib mode, MiriAllocBytes for global allocations are handled via `prepared_alloc_bytes`.
378-
// This additional call ensures that some `MiriAllocBytes` are always prepared.
378+
// This additional call ensures that some `MiriAllocBytes` are always prepared, just in case
379+
// this function gets called before the first time `addr_from_alloc_id` gets called.
379380
ecx.addr_from_alloc_id(id, kind)?;
380-
let mut global_state = ecx.machine.alloc_addresses.borrow_mut();
381381
// The memory we need here will have already been allocated during an earlier call to
382382
// `addr_from_alloc_id` for this allocation. So don't create a new `MiriAllocBytes` here, instead
383383
// fetch the previously prepared bytes from `prepared_alloc_bytes`.
384+
let mut global_state = ecx.machine.alloc_addresses.borrow_mut();
384385
let mut prepared_alloc_bytes = global_state
385386
.prepared_alloc_bytes
386387
.remove(&id)
@@ -390,10 +391,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
390391
assert_eq!(prepared_alloc_bytes.len(), bytes.len());
391392
// Copy allocation contents into prepared memory.
392393
prepared_alloc_bytes.copy_from_slice(bytes);
393-
prepared_alloc_bytes
394+
Ok(prepared_alloc_bytes)
394395
} else {
395-
MiriAllocBytes::from_bytes(std::borrow::Cow::Borrowed(&*bytes), align)
396-
})
396+
Ok(MiriAllocBytes::from_bytes(std::borrow::Cow::Borrowed(bytes), align))
397+
}
397398
}
398399

399400
/// When a pointer is used for a memory access, this computes where in which allocation the

0 commit comments

Comments
 (0)