Skip to content

Commit

Permalink
Be conservative about deriving Debug/Default with large alignment
Browse files Browse the repository at this point in the history
When there is large enough alignment that we might generate padding which has
more members that `RUST_DERIVE_IN_ARRAY_LIMIT`, we can break our ability to
derive traits. This commit solves this issue conservatively: there are cases
where we leave a derive on the table, because in order to know that we could add
that derive, we would need to compute padding before we determine whether we can
derive.

Fixes rust-lang#648
  • Loading branch information
fitzgen authored and tmfink committed Aug 4, 2017
1 parent 55149b9 commit 4d7eb65
Show file tree
Hide file tree
Showing 9 changed files with 113 additions and 9 deletions.
9 changes: 9 additions & 0 deletions src/ir/analysis/derive_debug.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,15 @@ impl<'ctx, 'gen> MonotoneFramework for CannotDeriveDebug<'ctx, 'gen> {
};
}

if ty.layout(self.ctx).map_or(false, |l| l.align > RUST_DERIVE_IN_ARRAY_LIMIT) {
// We have to be conservative: the struct *could* have enough
// padding that we emit an array that is longer than
// `RUST_DERIVE_IN_ARRAY_LIMIT`. If we moved padding calculations
// into the IR and computed them before this analysis, then we could
// be precise rather than conservative here.
return self.insert(id);
}

match *ty.kind() {
// Handle the simple cases. These can derive debug without further
// information.
Expand Down
5 changes: 5 additions & 0 deletions src/ir/comp.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ use super::dot::DotAttributes;
use super::item::{IsOpaque, Item};
use super::layout::Layout;
use super::traversal::{EdgeKind, Trace, Tracer};
use super::ty::RUST_DERIVE_IN_ARRAY_LIMIT;
use super::template::TemplateParameters;
use clang;
use codegen::struct_layout::{align_to, bytes_from_bits_pow2};
Expand Down Expand Up @@ -1435,6 +1436,10 @@ impl<'a> CanDeriveDefault<'a> for CompInfo {
return true;
}

if layout.map_or(false, |l| l.align > RUST_DERIVE_IN_ARRAY_LIMIT) {
return false;
}

if self.kind == CompKind::Union {
if ctx.options().unstable_rust {
return false;
Expand Down
65 changes: 65 additions & 0 deletions tests/expectations/tests/issue-648-derive-debug-with-padding.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
/* automatically generated by rust-bindgen */


#![allow(dead_code, non_snake_case, non_camel_case_types, non_upper_case_globals)]


/// We emit a `[u8; 63usize]` padding field for this struct, which cannot derive
/// Debug because 63 is over the hard coded limit. (Yes, this struct doesn't end
/// up with the reight alignment, we're waiting on `#[repr(align="N")]` to land
/// in rustc).
#[repr(C)]
#[derive(Copy)]
pub struct NoDebug {
pub c: ::std::os::raw::c_char,
pub __bindgen_padding_0: [u8; 63usize],
}
#[test]
fn bindgen_test_layout_NoDebug() {
assert_eq!(::std::mem::size_of::<NoDebug>() , 64usize , concat ! (
"Size of: " , stringify ! ( NoDebug ) ));
assert_eq! (unsafe {
& ( * ( 0 as * const NoDebug ) ) . c as * const _ as usize } ,
0usize , concat ! (
"Alignment of field: " , stringify ! ( NoDebug ) , "::" ,
stringify ! ( c ) ));
}
impl Clone for NoDebug {
fn clone(&self) -> Self { *self }
}
impl Default for NoDebug {
fn default() -> Self { unsafe { ::std::mem::zeroed() } }
}
/// This should derive Debug because the padding size is less than the max derive
/// Debug impl for arrays. However, we conservatively don't derive Debug because
/// we determine Debug derive-ability before we compute padding, which happens at
/// codegen. (Again, we expect to get the alignment wrong for similar reasons.)
#[repr(C)]
#[derive(Copy)]
pub struct ShouldDeriveDebugButDoesNot {
pub c: [::std::os::raw::c_char; 32usize],
pub d: ::std::os::raw::c_char,
pub __bindgen_padding_0: [u8; 31usize],
}
#[test]
fn bindgen_test_layout_ShouldDeriveDebugButDoesNot() {
assert_eq!(::std::mem::size_of::<ShouldDeriveDebugButDoesNot>() , 64usize
, concat ! (
"Size of: " , stringify ! ( ShouldDeriveDebugButDoesNot ) ));
assert_eq! (unsafe {
& ( * ( 0 as * const ShouldDeriveDebugButDoesNot ) ) . c as *
const _ as usize } , 0usize , concat ! (
"Alignment of field: " , stringify ! (
ShouldDeriveDebugButDoesNot ) , "::" , stringify ! ( c ) ));
assert_eq! (unsafe {
& ( * ( 0 as * const ShouldDeriveDebugButDoesNot ) ) . d as *
const _ as usize } , 32usize , concat ! (
"Alignment of field: " , stringify ! (
ShouldDeriveDebugButDoesNot ) , "::" , stringify ! ( d ) ));
}
impl Clone for ShouldDeriveDebugButDoesNot {
fn clone(&self) -> Self { *self }
}
impl Default for ShouldDeriveDebugButDoesNot {
fn default() -> Self { unsafe { ::std::mem::zeroed() } }
}
6 changes: 3 additions & 3 deletions tests/expectations/tests/layout_array.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ pub type rte_mempool_get_count =
-> ::std::os::raw::c_uint>;
/// Structure defining mempool operations structure
#[repr(C)]
#[derive(Debug, Copy)]
#[derive(Copy)]
pub struct rte_mempool_ops {
/// < Name of mempool ops struct.
pub name: [::std::os::raw::c_char; 32usize],
Expand Down Expand Up @@ -134,7 +134,7 @@ impl Clone for rte_spinlock_t {
/// any function pointers stored directly in the mempool struct would not be.
/// This results in us simply having "ops_index" in the mempool struct.
#[repr(C)]
#[derive(Debug, Copy)]
#[derive(Copy)]
pub struct rte_mempool_ops_table {
/// < Spinlock for add/delete.
pub sl: rte_spinlock_t,
Expand Down Expand Up @@ -173,7 +173,7 @@ impl Default for rte_mempool_ops_table {
}
/// Structure to hold malloc heap
#[repr(C)]
#[derive(Debug, Copy)]
#[derive(Copy)]
pub struct malloc_heap {
pub lock: rte_spinlock_t,
pub free_head: [malloc_heap__bindgen_ty_1; 13usize],
Expand Down
2 changes: 1 addition & 1 deletion tests/expectations/tests/layout_array_too_long.rs
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ impl Clone for ip_frag_key {
/// @internal Fragmented packet to reassemble.
/// First two entries in the frags[] array are for the last and first fragments.
#[repr(C)]
#[derive(Debug, Copy)]
#[derive(Copy)]
pub struct ip_frag_pkt {
/// < LRU list
pub lru: ip_frag_pkt__bindgen_ty_1,
Expand Down
2 changes: 1 addition & 1 deletion tests/expectations/tests/layout_kni_mbuf.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
pub const RTE_CACHE_LINE_MIN_SIZE: ::std::os::raw::c_uint = 64;
pub const RTE_CACHE_LINE_SIZE: ::std::os::raw::c_uint = 64;
#[repr(C)]
#[derive(Debug, Copy)]
#[derive(Copy)]
pub struct rte_kni_mbuf {
pub buf_addr: *mut ::std::os::raw::c_void,
pub buf_physaddr: u64,
Expand Down
9 changes: 6 additions & 3 deletions tests/expectations/tests/layout_large_align_field.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ impl Clone for ip_frag_key {
/// @internal Fragmented packet to reassemble.
/// First two entries in the frags[] array are for the last and first fragments.
#[repr(C)]
#[derive(Debug, Copy)]
#[derive(Copy)]
pub struct ip_frag_pkt {
/// < LRU list
pub lru: ip_frag_pkt__bindgen_ty_1,
Expand Down Expand Up @@ -258,7 +258,7 @@ impl Default for ip_pkt_list {
}
/// fragmentation table statistics
#[repr(C)]
#[derive(Debug, Default, Copy)]
#[derive(Copy)]
pub struct ip_frag_tbl_stat {
/// < total # of find/insert attempts.
pub find_num: u64,
Expand Down Expand Up @@ -312,9 +312,12 @@ fn bindgen_test_layout_ip_frag_tbl_stat() {
impl Clone for ip_frag_tbl_stat {
fn clone(&self) -> Self { *self }
}
impl Default for ip_frag_tbl_stat {
fn default() -> Self { unsafe { ::std::mem::zeroed() } }
}
/// fragmentation table
#[repr(C)]
#[derive(Debug, Copy)]
#[derive(Copy)]
pub struct rte_ip_frag_tbl {
/// < ttl for table entries.
pub max_cycles: u64,
Expand Down
2 changes: 1 addition & 1 deletion tests/expectations/tests/layout_mbuf.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ impl Clone for rte_atomic16_t {
}
/// The generic rte_mbuf, containing a packet mbuf.
#[repr(C)]
#[derive(Debug, Copy)]
#[derive(Copy)]
pub struct rte_mbuf {
pub cacheline0: MARKER,
/// < Virtual address of segment buffer.
Expand Down
22 changes: 22 additions & 0 deletions tests/headers/issue-648-derive-debug-with-padding.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
/**
* We emit a `[u8; 63usize]` padding field for this struct, which cannot derive
* Debug because 63 is over the hard coded limit. (Yes, this struct doesn't end
* up with the reight alignment, we're waiting on `#[repr(align="N")]` to land
* in rustc).
*/
struct NoDebug {
char c;
// padding of 63 bytes
} __attribute__((__aligned__(64)));

/**
* This should derive Debug because the padding size is less than the max derive
* Debug impl for arrays. However, we conservatively don't derive Debug because
* we determine Debug derive-ability before we compute padding, which happens at
* codegen. (Again, we expect to get the alignment wrong for similar reasons.)
*/
struct ShouldDeriveDebugButDoesNot {
char c[32];
char d;
// padding of 31 bytes
} __attribute__((__aligned__(64)));

0 comments on commit 4d7eb65

Please sign in to comment.