Skip to content

Commit

Permalink
fix #1108: false precondition when increasing align of global var
Browse files Browse the repository at this point in the history
  • Loading branch information
nunoplopes committed Dec 1, 2024
1 parent 902f258 commit 3a0b46c
Show file tree
Hide file tree
Showing 14 changed files with 113 additions and 40 deletions.
4 changes: 2 additions & 2 deletions ir/attrs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -381,8 +381,8 @@ encodePtrAttrs(State &s, const expr &ptrvalue, uint64_t derefBytes,
} else if (align != 1) {
non_poison &= p.isAligned(align);
if (isdecl)
s.addUB(merge(p.isDereferenceable(1, 1, false, true))
.implies(merge(p.isDereferenceable(1, align, false, true))));
s.addAxiom(merge(p.isDereferenceable(1, 1, false, true))
.implies(merge(p.isDereferenceable(1, align, false, true))));
}

if (allocalign) {
Expand Down
4 changes: 2 additions & 2 deletions ir/instr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3835,7 +3835,7 @@ StateValue GEP::toSMT(State &s) const {

// FIXME: not implemented for physical pointers
if (inbounds)
inbounds_np.add(ptr.inbounds());
inbounds_np.add(ptr.inbounds(false, config::disallow_ub_exploitation));

expr offset_sum = expr::mkUInt(0, bits_for_offset);
for (auto &[sz, idx] : offsets) {
Expand Down Expand Up @@ -3875,7 +3875,7 @@ StateValue GEP::toSMT(State &s) const {
non_poison.add(np);

if (inbounds)
inbounds_np.add(ptr.inbounds());
inbounds_np.add(ptr.inbounds(false, config::disallow_ub_exploitation));
}

if (inbounds) {
Expand Down
56 changes: 37 additions & 19 deletions ir/memory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -507,6 +507,10 @@ ostream& operator<<(ostream &os, const Byte &byte) {
return os;
}

unsigned Memory::bitsAlignmentInfo() {
return ilog2_ceil(bits_size_t, false);
}

bool Memory::observesAddresses() {
return true; //observes_addresses;
}
Expand Down Expand Up @@ -1131,7 +1135,8 @@ vector<Byte> Memory::load(const Pointer &ptr, unsigned bytes, set<expr> &undef,
}
} else {
uint64_t blk_size = UINT64_MAX;
bool single_load = ptr.blockSize().isUInt(blk_size) && blk_size == bytes;
bool single_load
= ptr.blockSizeAligned().isUInt(blk_size) && blk_size == bytes;
auto offset = ptr.getShortOffset();
expr blk_offset = single_load ? expr::mkUInt(0, offset) : offset;

Expand Down Expand Up @@ -1200,7 +1205,8 @@ void Memory::store(const Pointer &ptr,
auto mem = blk.val;

uint64_t blk_size;
bool full_write = ptr.blockSize().isUInt(blk_size) && blk_size == bytes;
bool full_write
= ptr.blockSizeAligned().isUInt(blk_size) && blk_size == bytes;

// optimization: if fully rewriting the block, don't bother with the old
// contents. Pick a value as the default one.
Expand Down Expand Up @@ -1261,7 +1267,7 @@ void Memory::storeLambda(const Pointer &ptr, const expr &offset,
auto orig_val = ::raw_load(blk.val, offset);

// optimization: full rewrite
if (bytes.eq(ptr.blockSize())) {
if (bytes.eq(ptr.blockSizeAligned())) {
blk.val = val_no_offset
? mk_block_if(cond, val, std::move(blk.val))
: expr::mkLambda(offset, mk_block_if(cond, val, std::move(orig_val)));
Expand Down Expand Up @@ -1485,11 +1491,11 @@ void Memory::mkAxioms(const Memory &tgt) const {
};

// transformation can increase alignment
expr align = expr::mkUInt(ilog2(heap_block_alignment), 6);
expr align = expr::mkUInt(ilog2(heap_block_alignment), bitsAlignmentInfo());

if (null_is_dereferenceable && has_null_block) {
state->addAxiom(Pointer::mkNullPointer(*this).blockAlignment()==UINT64_MAX);
state->addAxiom(Pointer::mkNullPointer(tgt).blockAlignment() == UINT64_MAX);
state->addAxiom(Pointer::mkNullPointer(*this).blockAlignment() == 0);
state->addAxiom(Pointer::mkNullPointer(tgt).blockAlignment() == 0);
}

for (unsigned bid = 0; bid < num_nonlocals_src; ++bid) {
Expand Down Expand Up @@ -1526,11 +1532,16 @@ void Memory::mkAxioms(const Memory &tgt) const {
if (skip_bid(bid))
continue;

Pointer p1(bid < num_nonlocals_src ? *this : tgt, bid, false);
// because tgt's align >= src's align, it's sufficient to have these
// constraints for tgt.
Pointer p1(tgt, bid, false);
auto addr = p1.getAddress();
auto sz = p1.blockSize().zextOrTrunc(bits_ptr_address);
auto align = p1.blockAlignment();

// limit max alignment so that aligning block size doesn't overflow
state->addAxiom(align.ule_extend(bits_ptr_address-1));

state->addAxiom(addr != zero);

// address must be properly aligned
Expand All @@ -1548,23 +1559,28 @@ void Memory::mkAxioms(const Memory &tgt) const {
expr::mkUInt(0, 1).concat(last.extract(bits_ptr_address-2, 0)))
: addr != last);
} else {
sz = p1.blockSizeAligned().zextOrTrunc(bits_ptr_address);
state->addAxiom(
Pointer::hasLocalBit()
// don't spill to local addr section
? (addr + sz).sign() == 0
: addr.add_no_uoverflow(sz));
}

state->addAxiom(p1.blockSize()
.round_up_bits_no_overflow(p1.blockAlignment()));

if (num_nonlocals > max_quadratic_disjoint)
continue;

// disjointness constraint
for (unsigned bid2 = bid + 1; bid2 < num_nonlocals; ++bid2) {
if (skip_bid(bid2))
continue;
Pointer p2(bid2 < num_nonlocals_src ? *this : tgt, bid2, false);
Pointer p2(tgt, bid2, false);
state->addAxiom(disjoint(addr, sz, align, p2.getAddress(),
p2.blockSize().zextOrTrunc(bits_ptr_address),
p2.blockSizeAligned()
.zextOrTrunc(bits_ptr_address),
p2.blockAlignment()));
}
}
Expand All @@ -1575,14 +1591,16 @@ void Memory::mkAxioms(const Memory &tgt) const {
expr bid1 = expr::mkFreshVar("#bid1", bid_ty);
expr bid2 = expr::mkFreshVar("#bid2", bid_ty);
expr offset = expr::mkUInt(0, bits_for_offset);
Pointer p1(*this, Pointer::mkLongBid(bid1, false), offset);
Pointer p2(*this, Pointer::mkLongBid(bid2, false), offset);
Pointer p1(tgt, Pointer::mkLongBid(bid1, false), offset);
Pointer p2(tgt, Pointer::mkLongBid(bid2, false), offset);
state->addAxiom(
expr::mkForAll({bid1, bid2},
bid1 == bid2 ||
disjoint(p1.getAddress(), p1.blockSize().zextOrTrunc(bits_ptr_address),
disjoint(p1.getAddress(),
p1.blockSizeAligned().zextOrTrunc(bits_ptr_address),
p1.blockAlignment(),
p2.getAddress(), p2.blockSize().zextOrTrunc(bits_ptr_address),
p2.getAddress(),
p2.blockSizeAligned().zextOrTrunc(bits_ptr_address),
p2.blockAlignment())));
}
}
Expand Down Expand Up @@ -1958,7 +1976,8 @@ static expr disjoint_local_blocks(const Memory &m, const expr &addr,
Pointer p2(m, Pointer::mkLongBid(sbid, true), zero);
disj &= p2.isBlockAlive()
.implies(disjoint(addr, sz, align, p2.getAddress(),
p2.blockSize().zextOrTrunc(bits_ptr_address),
p2.blockSizeAligned()
.zextOrTrunc(bits_ptr_address),
p2.blockAlignment()));
}
return disj;
Expand Down Expand Up @@ -2032,8 +2051,7 @@ Memory::alloc(const expr *size, uint64_t align, BlockKind blockKind,
expr size_zext;
expr nooverflow = true;
if (size) {
size_zext = size->zextOrTrunc(bits_size_t)
.round_up(expr::mkUInt(align, bits_size_t));
size_zext = size->zextOrTrunc(bits_size_t);
nooverflow = size->bits() <= bits_size_t ? true :
size->extract(size->bits()-1, bits_size_t) == 0;
}
Expand All @@ -2057,7 +2075,7 @@ Memory::alloc(const expr *size, uint64_t align, BlockKind blockKind,

assert(align != 0);
auto align_bits = ilog2(align);
expr align_expr = expr::mkUInt(align_bits, 6);
expr align_expr = expr::mkUInt(align_bits, bitsAlignmentInfo());
bool is_null = !is_local && has_null_block && bid == 0;

if (is_local) {
Expand Down Expand Up @@ -2111,7 +2129,7 @@ void Memory::startLifetime(const expr &ptr_local) {
if (observesAddresses())
state->addPre(p.isBlockAlive() ||
disjoint_local_blocks(*this, p.getAddress(),
p.blockSize().zextOrTrunc(bits_ptr_address),
p.blockSizeAligned().zextOrTrunc(bits_ptr_address),
p.blockAlignment(), local_blk_addr));

store_bv(p, true, local_block_liveness, non_local_block_liveness, true);
Expand Down Expand Up @@ -2418,7 +2436,7 @@ void Memory::copy(const Pointer &src, const Pointer &dst) {

void Memory::fillPoison(const expr &bid) {
Pointer p(*this, bid, expr::mkUInt(0, bits_for_offset));
expr blksz = p.blockSize();
expr blksz = p.blockSizeAligned();
memset(std::move(p).release(), IntType("i8", 8).getDummyValue(false),
std::move(blksz), bits_byte / 8, {}, false);
}
Expand Down
1 change: 1 addition & 0 deletions ir/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -369,6 +369,7 @@ class Memory {
friend class Pointer;

private:
static unsigned bitsAlignmentInfo();
smt::expr mk_block_val_array(unsigned bid) const;
Byte raw_load(bool local, unsigned bid, const smt::expr &offset) const;
void print_array(std::ostream &os, const smt::expr &a,
Expand Down
33 changes: 21 additions & 12 deletions ir/pointer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -329,6 +329,15 @@ expr Pointer::blockSizeOffsetT() const {
return bits_for_offset > bits_size_t ? sz.zextOrTrunc(bits_for_offset) : sz;
}

expr Pointer::blockSizeAligned() const {
return blockSize().round_up_bits(blockAlignment().zextOrTrunc(bits_size_t));
}

expr Pointer::blockSizeAlignedOffsetT() const {
expr sz = blockSizeAligned();
return bits_for_offset > bits_size_t ? sz.zextOrTrunc(bits_for_offset) : sz;
}

expr Pointer::reprWithoutAttrs() const {
return p.extract(totalBits() - 1, bits_for_ptrattrs);
}
Expand Down Expand Up @@ -390,7 +399,7 @@ expr Pointer::isOfBlock(const Pointer &block, const expr &bytes,
assert(block.getOffset().isZero());
expr addr = is_phy ? getPhysicalAddress() : getAddress();
expr block_addr = block.getLogAddress();
expr block_size = block.blockSize();
expr block_size = block.blockSizeAligned();

if (bytes.eq(block_size))
return addr == block_addr;
Expand All @@ -407,7 +416,7 @@ expr Pointer::isInboundsOf(const Pointer &block, const expr &bytes0,
expr bytes = bytes0.zextOrTrunc(bits_ptr_address);
expr addr = is_phy ? getPhysicalAddress() : getAddress();
expr block_addr = block.getLogAddress();
expr block_size = block.blockSize().zextOrTrunc(bits_ptr_address);
expr block_size = block.blockSizeAligned().zextOrTrunc(bits_ptr_address);

if (bytes.eq(block_size))
return addr == block_addr;
Expand All @@ -419,19 +428,19 @@ expr Pointer::isInboundsOf(const Pointer &block, const expr &bytes0,
(addr + bytes).ule(block_addr + block_size);
}

expr Pointer::isInbounds(bool strict) const {
expr Pointer::isInbounds(bool strict, bool align_size) const {
auto offset = getOffsetSizet();
auto size = blockSizeOffsetT();
auto size = align_size ? blockSizeAlignedOffsetT() : blockSizeOffsetT();
return (strict ? offset.ult(size) : offset.ule(size)) && !offset.isNegative();
}

expr Pointer::inbounds(bool simplify_ptr) {
expr Pointer::inbounds(bool simplify_ptr, bool align_size) {
if (!simplify_ptr)
return isInbounds(false);
return isInbounds(false, align_size);

DisjointExpr<expr> ret(expr(false)), all_ptrs;
for (auto &[ptr_expr, domain] : DisjointExpr<expr>(p, 3)) {
expr inb = Pointer(m, ptr_expr).isInbounds(false);
expr inb = Pointer(m, ptr_expr).isInbounds(false, align_size);
if (!inb.isFalse())
all_ptrs.add(ptr_expr, domain);
ret.add(std::move(inb), domain);
Expand All @@ -446,7 +455,7 @@ expr Pointer::inbounds(bool simplify_ptr) {

expr Pointer::blockAlignment() const {
return getValue("blk_align", m.local_blk_align, m.non_local_blk_align,
expr::mkUInt(0, 6), true);
expr::mkUInt(0, Memory::bitsAlignmentInfo()), true);
}

expr Pointer::isBlockAligned(uint64_t align, bool exact) const {
Expand Down Expand Up @@ -523,13 +532,13 @@ Pointer::isDereferenceable(const expr &bytes0, uint64_t align,
};

auto log_ptr = [&](Pointer &p) {
expr block_sz = p.blockSizeOffsetT();
expr block_sz = p.blockSizeAlignedOffsetT();
expr offset = p.getOffset();

expr cond;
if (isUndef(offset) ||
isUndef(p.getBid()) ||
bytes.ugt(p.blockSize()).isTrue() ||
bytes.ugt(p.blockSizeAligned()).isTrue() ||
p.getOffsetSizet().uge(block_sz).isTrue()) {
cond = false;
} else {
Expand Down Expand Up @@ -569,7 +578,7 @@ Pointer::isDereferenceable(const expr &bytes0, uint64_t align,

cond = p.isOfBlock(this_ptr, bytes, is_phy);

all_same_size &= bytes.eq(this_ptr.blockSize());
all_same_size &= bytes.eq(this_ptr.blockSizeAligned());

bids.add(
observes_local ? this_ptr.getBid() : this_ptr.getShortBid(), cond);
Expand Down Expand Up @@ -868,7 +877,7 @@ expr Pointer::isNull() const {

bool Pointer::isBlkSingleByte() const {
uint64_t blk_size;
return blockSize().isUInt(blk_size) && blk_size == bits_byte/8;
return blockSizeAligned().isUInt(blk_size) && blk_size == bits_byte/8;
}

pair<Pointer, expr> Pointer::findLogicalLocalPointer(const expr &addr) const {
Expand Down
6 changes: 4 additions & 2 deletions ir/pointer.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,8 @@ class Pointer {

smt::expr blockSize() const;
smt::expr blockSizeOffsetT() const; // to compare with offsets
smt::expr blockSizeAligned() const;
smt::expr blockSizeAlignedOffsetT() const; // to compare with offsets

const smt::expr& operator()() const { return p; }
smt::expr release() && { return std::move(p); }
Expand All @@ -112,8 +114,8 @@ class Pointer {
bool is_phy) const;
smt::expr isInboundsOf(const Pointer &block, const smt::expr &bytes,
bool is_phy) const;
smt::expr isInbounds(bool strict) const;
smt::expr inbounds(bool simplify_ptr = false);
smt::expr isInbounds(bool strict, bool align_size = false) const;
smt::expr inbounds(bool simplify_ptr = false, bool align_size = false);

smt::expr blockAlignment() const; // log(bits)
smt::expr isBlockAligned(uint64_t align, bool exact = false) const;
Expand Down
10 changes: 10 additions & 0 deletions ir/state.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -826,6 +826,16 @@ void State::addReturn(StateValue &&val) {
addUB(expr(false));
}

void State::addAxiom(AndExpr &&ands) {
assert(ands);
axioms.add(std::move(ands));
}

void State::addAxiom(expr &&axiom) {
assert(!axiom.isFalse());
axioms.add(std::move(axiom));
}

void State::addUB(pair<AndExpr, expr> &&ub) {
addUB(std::move(ub.first));
addGuardableUB(std::move(ub.second));
Expand Down
4 changes: 2 additions & 2 deletions ir/state.h
Original file line number Diff line number Diff line change
Expand Up @@ -278,8 +278,8 @@ class State {
void addReturn(StateValue &&val);

/*--- Axioms, preconditions, domains ---*/
void addAxiom(smt::AndExpr &&ands) { axioms.add(std::move(ands)); }
void addAxiom(smt::expr &&axiom) { axioms.add(std::move(axiom)); }
void addAxiom(smt::AndExpr &&ands);
void addAxiom(smt::expr &&axiom);
void addPre(smt::expr &&cond) { precondition.add(std::move(cond)); }

// we have 2 types of UB to support -disallow-ub-exploitation
Expand Down
17 changes: 17 additions & 0 deletions smt/expr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1133,6 +1133,16 @@ expr expr::round_up(const expr &power_of_two) const {
return (*this + minus_1) & ~minus_1;
}

expr expr::round_up_bits(const expr &nbits) const {
return round_up(mkUInt(1, *this) << nbits.zextOrTrunc(bits()));
}

expr expr::round_up_bits_no_overflow(const expr &nbits) const {
expr power_2 = mkUInt(1, *this) << nbits.zextOrTrunc(bits());
expr minus_1 = power_2 - mkUInt(1, power_2);
return add_no_uoverflow(minus_1);
}

#define fold_fp_neg(fn) \
do { \
expr cond, neg, v, v2; \
Expand Down Expand Up @@ -1724,6 +1734,13 @@ expr expr::ule(uint64_t rhs) const {
return ule(mkUInt(rhs, sort()));
}

expr expr::ule_extend(uint64_t rhs) const {
C();
if ((unsigned)bit_width(rhs) > bits())
return true;
return ule(rhs);
}

expr expr::ult(uint64_t rhs) const {
C();
return ult(mkUInt(rhs, sort()));
Expand Down
Loading

0 comments on commit 3a0b46c

Please sign in to comment.