mod dyngen;
pub(crate) mod error;
mod helpers;
mod impl_debug;
mod impl_partialeq;
mod postprocessing;
mod serialize;
pub(crate) mod struct_layout;
#[cfg(test)]
#[allow(warnings)]
pub(crate) mod bitfield_unit;
#[cfg(all(test, target_endian = "little"))]
mod bitfield_unit_tests;
use self::dyngen::DynamicItems;
use self::helpers::attributes;
use self::struct_layout::StructLayoutTracker;
use super::BindgenOptions;
use crate::callbacks::{DeriveInfo, FieldInfo, TypeKind as DeriveTypeKind};
use crate::codegen::error::Error;
use crate::ir::analysis::{HasVtable, Sizedness};
use crate::ir::annotations::{
Annotations, FieldAccessorKind, FieldVisibilityKind,
};
use crate::ir::comp::{
Bitfield, BitfieldUnit, CompInfo, CompKind, Field, FieldData, FieldMethods,
Method, MethodKind,
};
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::derive::{
CanDerive, CanDeriveCopy, CanDeriveDebug, CanDeriveDefault, CanDeriveEq,
CanDeriveHash, CanDeriveOrd, CanDerivePartialEq, CanDerivePartialOrd,
};
use crate::ir::dot;
use crate::ir::enum_ty::{Enum, EnumVariant, EnumVariantValue};
use crate::ir::function::{
ClangAbi, Function, FunctionKind, FunctionSig, Linkage,
};
use crate::ir::int::IntKind;
use crate::ir::item::{IsOpaque, Item, ItemCanonicalName, ItemCanonicalPath};
use crate::ir::item_kind::ItemKind;
use crate::ir::layout::Layout;
use crate::ir::module::Module;
use crate::ir::objc::{ObjCInterface, ObjCMethod};
use crate::ir::template::{
AsTemplateParam, TemplateInstantiation, TemplateParameters,
};
use crate::ir::ty::{Type, TypeKind};
use crate::ir::var::Var;
use proc_macro2::{self, Ident, Span};
use quote::TokenStreamExt;
use crate::{Entry, HashMap, HashSet};
use std::borrow::Cow;
use std::cell::Cell;
use std::collections::VecDeque;
use std::ffi::CStr;
use std::fmt::{self, Write};
use std::ops;
use std::str::{self, FromStr};
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum CodegenError {
Serialize { msg: String, loc: String },
Io(String),
}
impl From<std::io::Error> for CodegenError {
fn from(err: std::io::Error) -> Self {
Self::Io(err.to_string())
}
}
impl fmt::Display for CodegenError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Serialize { msg, loc } => {
write!(f, "serialization error at {}: {}", loc, msg)
}
Self::Io(err) => err.fmt(f),
}
}
}
// Name of type defined in constified enum module
pub(crate) static CONSTIFIED_ENUM_MODULE_REPR_NAME: &str = "Type";
fn top_level_path(
ctx: &BindgenContext,
item: &Item,
) -> Vec<proc_macro2::TokenStream> {
let mut path = vec![quote! { self }];
if ctx.options().enable_cxx_namespaces {
for _ in 0..item.codegen_depth(ctx) {
path.push(quote! { super });
}
}
path
}
fn root_import(
ctx: &BindgenContext,
module: &Item,
) -> proc_macro2::TokenStream {
assert!(ctx.options().enable_cxx_namespaces, "Somebody messed it up");
assert!(module.is_module());
let mut path = top_level_path(ctx, module);
let root = ctx.root_module().canonical_name(ctx);
let root_ident = ctx.rust_ident(root);
path.push(quote! { #root_ident });
let mut tokens = quote! {};
tokens.append_separated(path, quote!(::));
quote! {
#[allow(unused_imports)]
use #tokens ;
}
}
bitflags! {
#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
struct DerivableTraits: u16 {
const DEBUG = 1 << 0;
const DEFAULT = 1 << 1;
const COPY = 1 << 2;
const CLONE = 1 << 3;
const HASH = 1 << 4;
const PARTIAL_ORD = 1 << 5;
const ORD = 1 << 6;
const PARTIAL_EQ = 1 << 7;
const EQ = 1 << 8;
}
}
fn derives_of_item(
item: &Item,
ctx: &BindgenContext,
packed: bool,
) -> DerivableTraits {
let mut derivable_traits = DerivableTraits::empty();
let all_template_params = item.all_template_params(ctx);
if item.can_derive_copy(ctx) && !item.annotations().disallow_copy() {
derivable_traits |= DerivableTraits::COPY;
if ctx.options().rust_features().builtin_clone_impls ||
!all_template_params.is_empty()
{
// FIXME: This requires extra logic if you have a big array in a
// templated struct. The reason for this is that the magic:
// fn clone(&self) -> Self { *self }
// doesn't work for templates.
//
// It's not hard to fix though.
derivable_traits |= DerivableTraits::CLONE;
}
} else if packed {
// If the struct or union is packed, deriving from Copy is required for
// deriving from any other trait.
return derivable_traits;
}
if item.can_derive_debug(ctx) && !item.annotations().disallow_debug() {
derivable_traits |= DerivableTraits::DEBUG;
}
if item.can_derive_default(ctx) && !item.annotations().disallow_default() {
derivable_traits |= DerivableTraits::DEFAULT;
}
if item.can_derive_hash(ctx) {
derivable_traits |= DerivableTraits::HASH;
}
if item.can_derive_partialord(ctx) {
derivable_traits |= DerivableTraits::PARTIAL_ORD;
}
if item.can_derive_ord(ctx) {
derivable_traits |= DerivableTraits::ORD;
}
if item.can_derive_partialeq(ctx) {
derivable_traits |= DerivableTraits::PARTIAL_EQ;
}
if item.can_derive_eq(ctx) {
derivable_traits |= DerivableTraits::EQ;
}
derivable_traits
}
impl From<DerivableTraits> for Vec<&'static str> {
fn from(derivable_traits: DerivableTraits) -> Vec<&'static str> {
[
(DerivableTraits::DEBUG, "Debug"),
(DerivableTraits::DEFAULT, "Default"),
(DerivableTraits::COPY, "Copy"),
(DerivableTraits::CLONE, "Clone"),
(DerivableTraits::HASH, "Hash"),
(DerivableTraits::PARTIAL_ORD, "PartialOrd"),
(DerivableTraits::ORD, "Ord"),
(DerivableTraits::PARTIAL_EQ, "PartialEq"),
(DerivableTraits::EQ, "Eq"),
]
.iter()
.filter_map(|&(flag, derive)| {
Some(derive).filter(|_| derivable_traits.contains(flag))
})
.collect()
}
}
struct WrapAsVariadic {
new_name: String,
idx_of_va_list_arg: usize,
}
struct CodegenResult<'a> {
items: Vec<proc_macro2::TokenStream>,
dynamic_items: DynamicItems,
/// A monotonic counter used to add stable unique ID's to stuff that doesn't
/// need to be referenced by anything.
codegen_id: &'a Cell<usize>,
/// Whether a bindgen union has been generated at least once.
saw_bindgen_union: bool,
/// Whether an incomplete array has been generated at least once.
saw_incomplete_array: bool,
/// Whether Objective C types have been seen at least once.
saw_objc: bool,
/// Whether Apple block types have been seen at least once.
saw_block: bool,
/// Whether a bitfield allocation unit has been seen at least once.
saw_bitfield_unit: bool,
items_seen: HashSet<ItemId>,
/// The set of generated function/var names, needed because in C/C++ is
/// legal to do something like:
///
/// ```c++
/// extern "C" {
/// void foo();
/// extern int bar;
/// }
///
/// extern "C" {
/// void foo();
/// extern int bar;
/// }
/// ```
///
/// Being these two different declarations.
functions_seen: HashSet<String>,
vars_seen: HashSet<String>,
/// Used for making bindings to overloaded functions. Maps from a canonical
/// function name to the number of overloads we have already codegen'd for
/// that name. This lets us give each overload a unique suffix.
overload_counters: HashMap<String, u32>,
/// List of items to serialize. With optionally the argument for the wrap as
/// variadic transformation to be applied.
items_to_serialize: Vec<(ItemId, Option<WrapAsVariadic>)>,
}
impl<'a> CodegenResult<'a> {
fn new(codegen_id: &'a Cell<usize>) -> Self {
CodegenResult {
items: vec![],
dynamic_items: DynamicItems::new(),
saw_bindgen_union: false,
saw_incomplete_array: false,
saw_objc: false,
saw_block: false,
saw_bitfield_unit: false,
codegen_id,
items_seen: Default::default(),
functions_seen: Default::default(),
vars_seen: Default::default(),
overload_counters: Default::default(),
items_to_serialize: Default::default(),
}
}
fn dynamic_items(&mut self) -> &mut DynamicItems {
&mut self.dynamic_items
}
fn saw_bindgen_union(&mut self) {
self.saw_bindgen_union = true;
}
fn saw_incomplete_array(&mut self) {
self.saw_incomplete_array = true;
}
fn saw_objc(&mut self) {
self.saw_objc = true;
}
fn saw_block(&mut self) {
self.saw_block = true;
}
fn saw_bitfield_unit(&mut self) {
self.saw_bitfield_unit = true;
}
fn seen<Id: Into<ItemId>>(&self, item: Id) -> bool {
self.items_seen.contains(&item.into())
}
fn set_seen<Id: Into<ItemId>>(&mut self, item: Id) {
self.items_seen.insert(item.into());
}
fn seen_function(&self, name: &str) -> bool {
self.functions_seen.contains(name)
}
fn saw_function(&mut self, name: &str) {
self.functions_seen.insert(name.into());
}
/// Get the overload number for the given function name. Increments the
/// counter internally so the next time we ask for the overload for this
/// name, we get the incremented value, and so on.
fn overload_number(&mut self, name: &str) -> u32 {
let counter = self.overload_counters.entry(name.into()).or_insert(0);
let number = *counter;
*counter += 1;
number
}
fn seen_var(&self, name: &str) -> bool {
self.vars_seen.contains(name)
}
fn saw_var(&mut self, name: &str) {
self.vars_seen.insert(name.into());
}
fn inner<F>(&mut self, cb: F) -> Vec<proc_macro2::TokenStream>
where
F: FnOnce(&mut Self),
{
let mut new = Self::new(self.codegen_id);
cb(&mut new);
self.saw_incomplete_array |= new.saw_incomplete_array;
self.saw_objc |= new.saw_objc;
self.saw_block |= new.saw_block;
self.saw_bitfield_unit |= new.saw_bitfield_unit;
self.saw_bindgen_union |= new.saw_bindgen_union;
new.items
}
}
impl<'a> ops::Deref for CodegenResult<'a> {
type Target = Vec<proc_macro2::TokenStream>;
fn deref(&self) -> &Self::Target {
&self.items
}
}
impl<'a> ops::DerefMut for CodegenResult<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.items
}
}
/// A trait to convert a rust type into a pointer, optionally const, to the same
/// type.
trait ToPtr {
fn to_ptr(self, is_const: bool) -> syn::Type;
}
impl ToPtr for syn::Type {
fn to_ptr(self, is_const: bool) -> syn::Type {
if is_const {
syn::parse_quote! { *const #self }
} else {
syn::parse_quote! { *mut #self }
}
}
}
/// An extension trait for `syn::Type` that lets us append any implicit
/// template parameters that exist for some type, if necessary.
trait WithImplicitTemplateParams {
fn with_implicit_template_params(
self,
ctx: &BindgenContext,
item: &Item,
) -> Self;
}
impl WithImplicitTemplateParams for syn::Type {
fn with_implicit_template_params(
self,
ctx: &BindgenContext,
item: &Item,
) -> Self {
let item = item.id().into_resolver().through_type_refs().resolve(ctx);
let params = match *item.expect_type().kind() {
TypeKind::UnresolvedTypeRef(..) => {
unreachable!("already resolved unresolved type refs")
}
TypeKind::ResolvedTypeRef(..) => {
unreachable!("we resolved item through type refs")
}
// None of these types ever have implicit template parameters.
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Pointer(..) |
TypeKind::Reference(..) |
TypeKind::Int(..) |
TypeKind::Float(..) |
TypeKind::Complex(..) |
TypeKind::Array(..) |
TypeKind::TypeParam |
TypeKind::Opaque |
TypeKind::Function(..) |
TypeKind::Enum(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel |
TypeKind::TemplateInstantiation(..) => None,
_ => {
let params = item.used_template_params(ctx);
if params.is_empty() {
None
} else {
Some(params.into_iter().map(|p| {
p.try_to_rust_ty(ctx, &()).expect(
"template params cannot fail to be a rust type",
)
}))
}
}
};
if let Some(params) = params {
syn::parse_quote! { #self<#(#params),*> }
} else {
self
}
}
}
trait CodeGenerator {
/// Extra information from the caller.
type Extra;
/// Extra information returned to the caller.
type Return;
fn codegen(
&self,
ctx: &BindgenContext,
result: &mut CodegenResult<'_>,
extra: &Self::Extra,
) -> Self::Return;
}
impl Item {
fn process_before_codegen(
&self,
ctx: &BindgenContext,
result: &mut CodegenResult,
) -> bool {
if !self.is_enabled_for_codegen(ctx) {
return false;
}
if self.is_blocklisted(ctx) || result.seen(self.id()) {
debug!(
"<Item as CodeGenerator>::process_before_codegen: Ignoring hidden or seen: \
self = {:?}",
self
);
return false;
}
if !ctx.codegen_items().contains(&self.id()) {
// TODO(emilio, #453): Figure out what to do when this happens
// legitimately, we could track the opaque stuff and disable the
// assertion there I guess.
warn!("Found non-allowlisted item in code generation: {:?}", self);
}
result.set_seen(self.id());
true
}
}
impl CodeGenerator for Item {
type Extra = ();
type Return = ();
fn codegen(
&self,
ctx: &BindgenContext,
result: &mut CodegenResult<'_>,
_extra: &(),
) {
debug!("<Item as CodeGenerator>::codegen: self = {:?}", self);
if !self.process_before_codegen(ctx, result) {
return;
}
match *self.kind() {
ItemKind::Module(ref module) => {
module.codegen(ctx, result, self);
}
ItemKind::Function(ref fun) => {
fun.codegen(ctx, result, self);
}
ItemKind::Var(ref var) => {
var.codegen(ctx, result, self);
}
ItemKind::Type(ref ty) => {
ty.codegen(ctx, result, self);
}
}
}
}
impl CodeGenerator for Module {
type Extra = Item;
type Return = ();
fn codegen(
&self,
ctx: &BindgenContext,
result: &mut CodegenResult<'_>,
item: &Item,
) {
debug!("<Module as CodeGenerator>::codegen: item = {:?}", item);
let codegen_self = |result: &mut CodegenResult,
found_any: &mut bool| {
for child in self.children() {
if ctx.codegen_items().contains(child) {
*found_any = true;
ctx.resolve_item(*child).codegen(ctx, result, &());
}
}
if item.id() == ctx.root_module() {
if result.saw_block {
utils::prepend_block_header(ctx, &mut *result);
}
if result.saw_bindgen_union {
utils::prepend_union_types(ctx, &mut *result);
}
if result.saw_incomplete_array {
utils::prepend_incomplete_array_types(ctx, &mut *result);
}
if ctx.need_bindgen_float16_type() {
utils::prepend_float16_type(&mut *result);
}
if ctx.need_bindgen_complex_type() {
utils::prepend_complex_type(&mut *result);
}
if result.saw_objc {
utils::prepend_objc_header(ctx, &mut *result);
}
if result.saw_bitfield_unit {
utils::prepend_bitfield_unit_type(ctx, &mut *result);
}
}
};
if !ctx.options().enable_cxx_namespaces ||
(self.is_inline() &&
!ctx.options().conservative_inline_namespaces)
{
codegen_self(result, &mut false);
return;
}
let mut found_any = false;
let inner_items = result.inner(|result| {
result.push(root_import(ctx, item));
let path = item
.namespace_aware_canonical_path(ctx)
.join("::")
.into_boxed_str();
if let Some(raw_lines) = ctx.options().module_lines.get(&path) {
for raw_line in raw_lines {
found_any = true;
result.push(
proc_macro2::TokenStream::from_str(raw_line).unwrap(),
);
}
}
codegen_self(result, &mut found_any);
});
// Don't bother creating an empty module.
if !found_any {
return;
}
let name = item.canonical_name(ctx);
let ident = ctx.rust_ident(name);
result.push(if item.id() == ctx.root_module() {
quote! {
#[allow(non_snake_case, non_camel_case_types, non_upper_case_globals)]
pub mod #ident {
#( #inner_items )*
}
}
} else {
quote! {
pub mod #ident {
#( #inner_items )*
}
}
});
}
}
impl CodeGenerator for Var {
type Extra = Item;
type Return = ();
fn codegen(
&self,
ctx: &BindgenContext,
result: &mut CodegenResult<'_>,
item: &Item,
) {
use crate::ir::var::VarType;
debug!("<Var as CodeGenerator>::codegen: item = {:?}", item);
debug_assert!(item.is_enabled_for_codegen(ctx));
let canonical_name = item.canonical_name(ctx);
if result.seen_var(&canonical_name) {
return;
}
result.saw_var(&canonical_name);
let canonical_ident = ctx.rust_ident(&canonical_name);
// We can't generate bindings to static variables of templates. The
// number of actual variables for a single declaration are open ended
// and we don't know what instantiations do or don't exist.
if !item.all_template_params(ctx).is_empty() {
return;
}
let mut attrs = vec![];
if let Some(comment) = item.comment(ctx) {
attrs.push(attributes::doc(comment));
}
let var_ty = self.ty();
let ty = var_ty.to_rust_ty_or_opaque(ctx, &());
if let Some(val) = self.val() {
match *val {
VarType::Bool(val) => {
result.push(quote! {
#(#attrs)*
pub const #canonical_ident : #ty = #val ;
});
}
VarType::Int(val) => {
let int_kind = var_ty
.into_resolver()
.through_type_aliases()
.through_type_refs()
.resolve(ctx)
.expect_type()
.as_integer()
.unwrap();
let val = if int_kind.is_signed() {
helpers::ast_ty::int_expr(val)
} else {
helpers::ast_ty::uint_expr(val as _)
};
result.push(quote! {
#(#attrs)*
pub const #canonical_ident : #ty = #val ;
});
}
VarType::String(ref bytes) => {
let prefix = ctx.trait_prefix();
let options = ctx.options();
let rust_features = options.rust_features;
let mut cstr_bytes = bytes.clone();
cstr_bytes.push(0);
let len = proc_macro2::Literal::usize_unsuffixed(
cstr_bytes.len(),
);
// TODO: Here we ignore the type we just made up, probably
// we should refactor how the variable type and ty ID work.
let array_ty = quote! { [u8; #len] };
let cstr_ty = quote! { ::#prefix::ffi::CStr };
let bytes = proc_macro2::Literal::byte_string(&cstr_bytes);
if options.generate_cstr &&
rust_features.const_cstr &&
CStr::from_bytes_with_nul(&cstr_bytes).is_ok()
{
result.push(quote! {
#(#attrs)*
#[allow(unsafe_code)]
pub const #canonical_ident: cstr_ty = unsafe {
#cstr_ty::from_bytes_with_nul_unchecked(#bytes)
};
});
} else {
let lifetime = if rust_features.static_lifetime_elision
{
None
} else {
Some(quote! { 'static })
}
.into_iter();
result.push(quote! {
#(#attrs)*
pub const #canonical_ident: (#lifetime )*#array_ty = #bytes ;
});
}
}
VarType::Float(f) => {
if let Ok(expr) = helpers::ast_ty::float_expr(ctx, f) {
result.push(quote! {
#(#attrs)*
pub const #canonical_ident : #ty = #expr ;
});
}
}
VarType::Char(c) => {
result.push(quote! {
#(#attrs)*
pub const #canonical_ident : #ty = #c ;
});
}
}
} else {
// If necessary, apply a `#[link_name]` attribute
if let Some(link_name) = self.link_name() {
attrs.push(attributes::link_name::<false>(link_name));
} else {
let link_name =
self.mangled_name().unwrap_or_else(|| self.name());
if !utils::names_will_be_identical_after_mangling(
&canonical_name,
link_name,
None,
) {
attrs.push(attributes::link_name::<false>(link_name));
}
}
let maybe_mut = if self.is_const() {
quote! {}
} else {
quote! { mut }
};
let tokens = quote!(
extern "C" {
#(#attrs)*
pub static #maybe_mut #canonical_ident: #ty;
}
);
result.push(tokens);
}
}
}
impl CodeGenerator for Type {
type Extra = Item;
type Return = ();
fn codegen(
&self,
ctx: &BindgenContext,
result: &mut CodegenResult<'_>,
item: &Item,
) {
debug!("<Type as CodeGenerator>::codegen: item = {:?}", item);
debug_assert!(item.is_enabled_for_codegen(ctx));
match *self.kind() {
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Float(..) |
TypeKind::Complex(..) |
TypeKind::Array(..) |
TypeKind::Vector(..) |
TypeKind::Pointer(..) |
TypeKind::Reference(..) |
TypeKind::Function(..) |
TypeKind::ResolvedTypeRef(..) |
TypeKind::Opaque |
TypeKind::TypeParam => {
// These items don't need code generation, they only need to be
// converted to rust types in fields, arguments, and such.
// NOTE(emilio): If you add to this list, make sure to also add
// it to BindgenContext::compute_allowlisted_and_codegen_items.
}
TypeKind::TemplateInstantiation(ref inst) => {
inst.codegen(ctx, result, item)
}
TypeKind::BlockPointer(inner) => {
if !ctx.options().generate_block {
return;
}
let inner_item =
inner.into_resolver().through_type_refs().resolve(ctx);
let name = item.canonical_name(ctx);
let inner_rust_type = {
if let TypeKind::Function(fnsig) =
inner_item.kind().expect_type().kind()
{
utils::fnsig_block(ctx, fnsig)
} else {
panic!("invalid block typedef: {:?}", inner_item)
}
};
let rust_name = ctx.rust_ident(name);
let mut tokens = if let Some(comment) = item.comment(ctx) {
attributes::doc(comment)
} else {
quote! {}
};
tokens.append_all(quote! {
pub type #rust_name = #inner_rust_type ;
});
result.push(tokens);
result.saw_block();
}
TypeKind::Comp(ref ci) => ci.codegen(ctx, result, item),
TypeKind::TemplateAlias(inner, _) | TypeKind::Alias(inner) => {
let inner_item =
inner.into_resolver().through_type_refs().resolve(ctx);
let name = item.canonical_name(ctx);
let path = item.canonical_path(ctx);
{
let through_type_aliases = inner
.into_resolver()
.through_type_refs()
.through_type_aliases()
.resolve(ctx);
// Try to catch the common pattern:
//
// typedef struct foo { ... } foo;
//
// here, and also other more complex cases like #946.
if through_type_aliases.canonical_path(ctx) == path {
return;
}
}
// If this is a known named type, disallow generating anything
// for it too. If size_t -> usize conversions are enabled, we
// need to check that these conversions are permissible, but
// nothing needs to be generated, still.
let spelling = self.name().expect("Unnamed alias?");
if utils::type_from_named(ctx, spelling).is_some() {
if let "size_t" | "ssize_t" = spelling {
let layout = inner_item
.kind()
.expect_type()
.layout(ctx)
.expect("No layout?");
assert_eq!(
layout.size,
ctx.target_pointer_size(),
"Target platform requires `--no-size_t-is-usize`. The size of `{}` ({}) does not match the ta
rget pointer size ({})",
spelling,
layout.size,
ctx.target_pointer_size(),
);
assert_eq!(
layout.align,
ctx.target_pointer_size(),
"Target platform requires `--no-size_t-is-usize`. The alignment of `{}` ({}) does not match the target pointer size ({})",
spelling,
layout.align,
ctx.target_pointer_size(),
);
}
return;
}
let mut outer_params = item.used_template_params(ctx);
let is_opaque = item.is_opaque(ctx, &());
let inner_rust_type = if is_opaque {
outer_params = vec![];
self.to_opaque(ctx, item)
} else {
// Its possible that we have better layout information than
// the inner type does, so fall back to an opaque blob based
// on our layout if converting the inner item fails.
inner_item
.try_to_rust_ty_or_opaque(ctx, &())
.unwrap_or_else(|_| self.to_opaque(ctx, item))
.with_implicit_template_params(ctx, inner_item)
};
{
// FIXME(emilio): This is a workaround to avoid generating
// incorrect type aliases because of types that we haven't
// been able to resolve (because, eg, they depend on a
// template parameter).
//
// It's kind of a shame not generating them even when they
// could be referenced, but we already do the same for items
// with invalid template parameters, and at least this way
// they can be replaced, instead of generating plain invalid
// code.
let inner_canon_type =
inner_item.expect_type().canonical_type(ctx);
if inner_canon_type.is_invalid_type_param() {
warn!(
"Item contained invalid named type, skipping: \
{:?}, {:?}",
item, inner_item
);
return;
}
}
let rust_name = ctx.rust_ident(&name);
let mut tokens = if let Some(comment) = item.comment(ctx) {
attributes::doc(comment)
} else {
quote! {}
};
let alias_style = if ctx.options().type_alias.matches(&name) {
AliasVariation::TypeAlias
} else if ctx.options().new_type_alias.matches(&name) {
AliasVariation::NewType
} else if ctx.options().new_type_alias_deref.matches(&name) {
AliasVariation::NewTypeDeref
} else {
ctx.options().default_alias_style
};
// We prefer using `pub use` over `pub type` because of:
// https://github.com/rust-lang/rust/issues/26264
if matches!(inner_rust_type, syn::Type::Path(_)) &&
outer_params.is_empty() &&
!is_opaque &&
alias_style == AliasVariation::TypeAlias &&
inner_item.expect_type().canonical_type(ctx).is_enum()
{
tokens.append_all(quote! {
pub use
});
let path = top_level_path(ctx, item);
tokens.append_separated(path, quote!(::));
tokens.append_all(quote! {
:: #inner_rust_type as #rust_name ;
});
result.push(tokens);
return;
}
tokens.append_all(match alias_style {
AliasVariation::TypeAlias => quote! {
pub type #rust_name
},
AliasVariation::NewType | AliasVariation::NewTypeDeref => {
assert!(
ctx.options().rust_features().repr_transparent,
"repr_transparent feature is required to use {:?}",
alias_style
);
let mut attributes =
vec![attributes::repr("transparent")];
let packed = false; // Types can't be packed in Rust.
let derivable_traits =
derives_of_item(item, ctx, packed);
if !derivable_traits.is_empty() {
let derives: Vec<_> = derivable_traits.into();
attributes.push(attributes::derives(&derives))
}
quote! {
#( #attributes )*
pub struct #rust_name
}
}
});
let params: Vec<_> = outer_params
.into_iter()
.filter_map(|p| p.as_template_param(ctx, &()))
.collect();
if params
.iter()
.any(|p| ctx.resolve_type(*p).is_invalid_type_param())
{
warn!(
"Item contained invalid template \
parameter: {:?}",
item
);
return;
}
let params: Vec<_> = params
.iter()
.map(|p| {
p.try_to_rust_ty(ctx, &()).expect(
"type parameters can always convert to rust ty OK",
)
})
.collect();
if !params.is_empty() {
tokens.append_all(quote! {
< #( #params ),* >
});
}
let access_spec =
access_specifier(ctx.options().default_visibility);
tokens.append_all(match alias_style {
AliasVariation::TypeAlias => quote! {
= #inner_rust_type ;
},
AliasVariation::NewType | AliasVariation::NewTypeDeref => {
quote! {
(#access_spec #inner_rust_type) ;
}
}
});
if alias_style == AliasVariation::NewTypeDeref {
let prefix = ctx.trait_prefix();
tokens.append_all(quote! {
impl ::#prefix::ops::Deref for #rust_name {
type Target = #inner_rust_type;
#[inline]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl ::#prefix::ops::DerefMut for #rust_name {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
});
}
result.push(tokens);
}
TypeKind::Enum(ref ei) => ei.codegen(ctx, result, item),
TypeKind::ObjCId | TypeKind::ObjCSel => {
result.saw_objc();
}
TypeKind::ObjCInterface(ref interface) => {
interface.codegen(ctx, result, item)
}
ref u @ TypeKind::UnresolvedTypeRef(..) => {
unreachable!("Should have been resolved after parsing {:?}!", u)
}
}
}
}
struct Vtable<'a> {
item_id: ItemId,
/// A reference to the originating compound object.
#[allow(dead_code)]
comp_info: &'a CompInfo,
}
impl<'a> Vtable<'a> {
fn new(item_id: ItemId, comp_info: &'a CompInfo) -> Self {
Vtable { item_id, comp_info }
}
}
impl<'a> CodeGenerator for Vtable<'a> {
type Extra = Item;
type Return = ();
fn codegen(
&self,
ctx: &BindgenContext,
result: &mut CodegenResult<'_>,
item: &Item,
) {
assert_eq!(item.id(), self.item_id);
debug_assert!(item.is_enabled_for_codegen(ctx));
let name = ctx.rust_ident(self.canonical_name(ctx));
// For now, we will only generate vtables for classes that:
// - do not inherit from others (compilers merge VTable from primary parent class).
// - do not contain a virtual destructor (requires ordering; platforms generate different vtables).
if ctx.options().vtable_generation &&
self.comp_info.base_members().is_empty() &&
self.comp_info.destructor().is_none()
{
let class_ident = ctx.rust_ident(self.item_id.canonical_name(ctx));
let methods = self
.comp_info
.methods()
.iter()
.filter_map(|m| {
if !m.is_virtual() {
return None;
}
let function_item = ctx.resolve_item(m.signature());
let function = function_item.expect_function();
let signature_item = ctx.resolve_item(function.signature());
let signature = match signature_item.expect_type().kind() {
TypeKind::Function(ref sig) => sig,
_ => panic!("Function signature type mismatch"),
};
// FIXME: Is there a canonical name without the class prepended?
let function_name = function_item.canonical_name(ctx);
// FIXME: Need to account for overloading with times_seen (separately from regular function path).
let function_name = ctx.rust_ident(function_name);
let mut args = utils::fnsig_arguments(ctx, signature);
let ret = utils::fnsig_return_ty(ctx, signature);
args[0] = if m.is_const() {
quote! { this: *const #class_ident }
} else {
quote! { this: *mut #class_ident }
};
Some(quote! {
pub #function_name : unsafe extern "C" fn( #( #args ),* ) #ret
})
})
.collect::<Vec<_>>();
result.push(quote! {
#[repr(C)]
pub struct #name {
#( #methods ),*
}
})
} else {
// For the cases we don't support, simply generate an empty struct.
let void = helpers::ast_ty::c_void(ctx);
result.push(quote! {
#[repr(C)]
pub struct #name ( #void );
});
}
}
}
impl<'a> ItemCanonicalName for Vtable<'a> {
fn canonical_name(&self, ctx: &BindgenContext) -> String {
format!("{}__bindgen_vtable", self.item_id.canonical_name(ctx))
}
}
impl<'a> TryToRustTy for Vtable<'a> {
type Extra = ();
fn try_to_rust_ty(
&self,
ctx: &BindgenContext,
_: &(),
) -> error::Result<syn::Type> {
let name = ctx.rust_ident(self.canonical_name(ctx));
Ok(syn::parse_quote! { #name })
}
}
impl CodeGenerator for TemplateInstantiation {
type Extra = Item;
type Return = ();
fn codegen(
&self,
ctx: &BindgenContext,
result: &mut CodegenResult<'_>,
item: &Item,
) {
debug_assert!(item.is_enabled_for_codegen(ctx));
// Although uses of instantiations don't need code generation, and are
// just converted to rust types in fields, vars, etc, we take this
// opportunity to generate tests for their layout here. If the
// instantiation is opaque, then its presumably because we don't
// properly understand it (maybe because of specializations), and so we
// shouldn't emit layout tests either.
if !ctx.options().layout_tests || self.is_opaque(ctx, item) {
return;
}
// If there are any unbound type parameters, then we can't generate a
// layout test because we aren't dealing with a concrete type with a
// concrete size and alignment.
if ctx.uses_any_template_parameters(item.id()) {
return;
}
let layout = item.kind().expect_type().layout(ctx);
if let Some(layout) = layout {
let size = layout.size;
let align = layout.align;
let name = item.full_disambiguated_name(ctx);
let mut fn_name =
format!("__bindgen_test_layout_{}_instantiation", name);
let times_seen = result.overload_number(&fn_name);
if times_seen > 0 {
write!(&mut fn_name, "_{}", times_seen).unwrap();
}
let fn_name = ctx.rust_ident_raw(fn_name);
let prefix = ctx.trait_prefix();
let ident = item.to_rust_ty_or_opaque(ctx, &());
let size_of_expr = quote! {
::#prefix::mem::size_of::<#ident>()
};
let align_of_expr = quote! {
::#prefix::mem::align_of::<#ident>()
};
let item = quote! {
#[test]
fn #fn_name() {
assert_eq!(#size_of_expr, #size,
concat!("Size of template specialization: ",
stringify!(#ident)));
assert_eq!(#align_of_expr, #align,
concat!("Alignment of template specialization: ",
stringify!(#ident)));
}
};
result.push(item);
}
}
}
/// Trait for implementing the code generation of a struct or union field.
trait FieldCodegen<'a> {
type Extra;
#[allow(clippy::too_many_arguments)]
fn codegen<F, M>(
&self,
ctx: &BindgenContext,
visibility_kind: FieldVisibilityKind,
accessor_kind: FieldAccessorKind,
parent: &CompInfo,
parent_item: &Item,
result: &mut CodegenResult,
struct_layout: &mut StructLayoutTracker,
fields: &mut F,
methods: &mut M,
extra: Self::Extra,
) where
F: Extend<proc_macro2::TokenStream>,
M: Extend<proc_macro2::TokenStream>;
}
impl<'a> FieldCodegen<'a> for Field {
type Extra = ();
fn codegen<F, M>(
&self,
ctx: &BindgenContext,
visibility_kind: FieldVisibilityKind,
accessor_kind: FieldAccessorKind,
parent: &CompInfo,
parent_item: &Item,
result: &mut CodegenResult,
struct_layout: &mut StructLayoutTracker,
fields: &mut F,
methods: &mut M,
_: (),
) where
F: Extend<proc_macro2::TokenStream>,
M: Extend<proc_macro2::TokenStream>,
{
match *self {
Field::DataMember(ref data) => {
data.codegen(
ctx,
visibility_kind,
accessor_kind,
parent,
parent_item,
result,
struct_layout,
fields,
methods,
(),
);
}
Field::Bitfields(ref unit) => {
unit.codegen(
ctx,
visibility_kind,
accessor_kind,
parent,
parent_item,
result,
struct_layout,
fields,
methods,
(),
);
}
}
}
}
fn wrap_union_field_if_needed(
ctx: &BindgenContext,
struct_layout: &StructLayoutTracker,
ty: syn::Type,
result: &mut CodegenResult,
) -> syn::Type {
if struct_layout.is_rust_union() {
if struct_layout.can_copy_union_fields() {
ty
} else {
let prefix = ctx.trait_prefix();
syn::parse_quote! { ::#prefix::mem::ManuallyDrop<#ty> }
}
} else {
result.saw_bindgen_union();
if ctx.options().enable_cxx_namespaces {
syn::parse_quote! { root::__BindgenUnionField<#ty> }
} else {
syn::parse_quote! { __BindgenUnionField<#ty> }
}
}
}
impl<'a> FieldCodegen<'a> for FieldData {
type Extra = ();
fn codegen<F, M>(
&self,
ctx: &BindgenContext,
parent_visibility_kind: FieldVisibilityKind,
accessor_kind: FieldAccessorKind,
parent: &CompInfo,
parent_item: &Item,
result: &mut CodegenResult,
struct_layout: &mut StructLayoutTracker,
fields: &mut F,
methods: &mut M,
_: (),
) where
F: Extend<proc_macro2::TokenStream>,
M: Extend<proc_macro2::TokenStream>,
{
// Bitfields are handled by `FieldCodegen` implementations for
// `BitfieldUnit` and `Bitfield`.
assert!(self.bitfield_width().is_none());
let field_item =
self.ty().into_resolver().through_type_refs().resolve(ctx);
let field_ty = field_item.expect_type();
let ty = self
.ty()
.to_rust_ty_or_opaque(ctx, &())
.with_implicit_template_params(ctx, field_item);
// NB: If supported, we use proper `union` types.
let ty = if parent.is_union() {
wrap_union_field_if_needed(ctx, struct_layout, ty, result)
} else if let Some(item) = field_ty.is_incomplete_array(ctx) {
result.saw_incomplete_array();
let inner = item.to_rust_ty_or_opaque(ctx, &());
if ctx.options().enable_cxx_namespaces {
syn::parse_quote! { root::__IncompleteArrayField<#inner> }
} else {
syn::parse_quote! { __IncompleteArrayField<#inner> }
}
} else {
ty
};
let mut field = quote! {};
if ctx.options().generate_comments {
if let Some(raw_comment) = self.comment() {
let comment = ctx.options().process_comment(raw_comment);
field = attributes::doc(comment);
}
}
let field_name = self
.name()
.map(|name| ctx.rust_mangle(name).into_owned())
.expect("Each field should have a name in codegen!");
let field_name = field_name.as_str();
let field_ident = ctx.rust_ident_raw(field_name);
if let Some(padding_field) =
struct_layout.saw_field(field_name, field_ty, self.offset())
{
fields.extend(Some(padding_field));
}
let visibility = compute_visibility(
ctx,
self.is_public(),
ctx.options().last_callback(|cb| {
cb.field_visibility(FieldInfo {
type_name: &parent_item.canonical_name(ctx),
field_name,
})
}),
self.annotations(),
parent_visibility_kind,
);
let accessor_kind =
self.annotations().accessor_kind().unwrap_or(accessor_kind);
match visibility {
FieldVisibilityKind::Private => {
field.append_all(quote! {
#field_ident : #ty ,
});
}
FieldVisibilityKind::PublicCrate => {
field.append_all(quote! {
pub(crate) #field_ident : #ty ,
});
}
FieldVisibilityKind::Public => {
field.append_all(quote! {
pub #field_ident : #ty ,
});
}
}
fields.extend(Some(field));
// TODO: Factor the following code out, please!
if accessor_kind == FieldAccessorKind::None {
return;
}
let getter_name = ctx.rust_ident_raw(format!("get_{}", field_name));
let mutable_getter_name =
ctx.rust_ident_raw(format!("get_{}_mut", field_name));
methods.extend(Some(match accessor_kind {
FieldAccessorKind::None => unreachable!(),
FieldAccessorKind::Regular => {
quote! {
#[inline]
pub fn #getter_name(&self) -> & #ty {
&self.#field_ident
}
#[inline]
pub fn #mutable_getter_name(&mut self) -> &mut #ty {
&mut self.#field_ident
}
}
}
FieldAccessorKind::Unsafe => {
quote! {
#[inline]
pub unsafe fn #getter_name(&self) -> & #ty {
&self.#field_ident
}
#[inline]
pub unsafe fn #mutable_getter_name(&mut self) -> &mut #ty {
&mut self.#field_ident
}
}
}
FieldAccessorKind::Immutable => {
quote! {
#[inline]
pub fn #getter_name(&self) -> & #ty {
&self.#field_ident
}
}
}
}));
}
}
impl BitfieldUnit {
/// Get the constructor name for this bitfield unit.
fn ctor_name(&self) -> proc_macro2::TokenStream {
let ctor_name = Ident::new(
&format!("new_bitfield_{}", self.nth()),
Span::call_site(),
);
quote! {
#ctor_name
}
}
}
impl Bitfield {
/// Extend an under construction bitfield unit constructor with this
/// bitfield. This sets the relevant bits on the `__bindgen_bitfield_unit`
/// variable that's being constructed.
fn extend_ctor_impl(
&self,
ctx: &BindgenContext,
param_name: proc_macro2::TokenStream,
mut ctor_impl: proc_macro2::TokenStream,
) -> proc_macro2::TokenStream {
let bitfield_ty = ctx.resolve_type(self.ty());
let bitfield_ty_layout = bitfield_ty
.layout(ctx)
.expect("Bitfield without layout? Gah!");
let bitfield_int_ty = helpers::integer_type(ctx, bitfield_ty_layout)
.expect(
"Should already have verified that the bitfield is \
representable as an int",
);
let offset = self.offset_into_unit();
let width = self.width() as u8;
let prefix = ctx.trait_prefix();
ctor_impl.append_all(quote! {
__bindgen_bitfield_unit.set(
#offset,
#width,
{
let #param_name: #bitfield_int_ty = unsafe {
::#prefix::mem::transmute(#param_name)
};
#param_name as u64
}
);
});
ctor_impl
}
}
fn access_specifier(
visibility: FieldVisibilityKind,
) -> proc_macro2::TokenStream {
match visibility {
FieldVisibilityKind::Private => quote! {},
FieldVisibilityKind::PublicCrate => quote! { pub(crate) },
FieldVisibilityKind::Public => quote! { pub },
}
}
/// Compute a fields or structs visibility based on multiple conditions.
/// 1. If the element was declared public, and we respect such CXX accesses specs
/// (context option) => By default Public, but this can be overruled by an `annotation`.
///
/// 2. If the element was declared private, and we respect such CXX accesses specs
/// (context option) => By default Private, but this can be overruled by an `annotation`.
///
/// 3. If we do not respect visibility modifiers, the result depends on the `annotation`,
/// if any, or the passed `default_kind`.
///
fn compute_visibility(
ctx: &BindgenContext,
is_declared_public: bool,
callback_override: Option<FieldVisibilityKind>,
annotations: &Annotations,
default_kind: FieldVisibilityKind,
) -> FieldVisibilityKind {
callback_override
.or_else(|| annotations.visibility_kind())
.unwrap_or_else(|| {
match (is_declared_public, ctx.options().respect_cxx_access_specs) {
(true, true) => {
// declared as public, cxx specs are respected
FieldVisibilityKind::Public
}
(false, true) => {
// declared as private, cxx specs are respected
FieldVisibilityKind::Private
}
(_, false) => {
// cxx specs are not respected, declaration does not matter.
default_kind
}
}
})
}
impl<'a> FieldCodegen<'a> for BitfieldUnit {
type Extra = ();
fn codegen<F, M>(
&self,
ctx: &BindgenContext,
visibility_kind: FieldVisibilityKind,
accessor_kind: FieldAccessorKind,
parent: &CompInfo,
parent_item: &Item,
result: &mut CodegenResult,
struct_layout: &mut StructLayoutTracker,
fields: &mut F,
methods: &mut M,
_: (),
) where
F: Extend<proc_macro2::TokenStream>,
M: Extend<proc_macro2::TokenStream>,
{
use crate::ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT;
result.saw_bitfield_unit();
let layout = self.layout();
let unit_field_ty = helpers::bitfield_unit(ctx, layout);
let field_ty = {
let unit_field_ty = unit_field_ty.clone();
if parent.is_union() {
wrap_union_field_if_needed(
ctx,
struct_layout,
unit_field_ty,
result,
)
} else {
unit_field_ty
}
};
{
let align_field_name = format!("_bitfield_align_{}", self.nth());
let align_field_ident = ctx.rust_ident(align_field_name);
let align_ty = match self.layout().align {
n if n >= 8 => quote! { u64 },
4 => quote! { u32 },
2 => quote! { u16 },
_ => quote! { u8 },
};
let access_spec = access_specifier(visibility_kind);
let align_field = quote! {
#access_spec #align_field_ident: [#align_ty; 0],
};
fields.extend(Some(align_field));
}
let unit_field_name = format!("_bitfield_{}", self.nth());
let unit_field_ident = ctx.rust_ident(&unit_field_name);
let ctor_name = self.ctor_name();
let mut ctor_params = vec![];
let mut ctor_impl = quote! {};
// We cannot generate any constructor if the underlying storage can't
// implement AsRef<[u8]> / AsMut<[u8]> / etc, or can't derive Default.
//
// We don't check `larger_arrays` here because Default does still have
// the 32 items limitation.
let mut generate_ctor = layout.size <= RUST_DERIVE_IN_ARRAY_LIMIT;
let mut unit_visibility = visibility_kind;
for bf in self.bitfields() {
// Codegen not allowed for anonymous bitfields
if bf.name().is_none() {
continue;
}
if layout.size > RUST_DERIVE_IN_ARRAY_LIMIT &&
!ctx.options().rust_features().larger_arrays
{
continue;
}
let mut bitfield_representable_as_int = true;
let mut bitfield_visibility = visibility_kind;
bf.codegen(
ctx,
visibility_kind,
accessor_kind,
parent,
parent_item,
result,
struct_layout,
fields,
methods,
(
&unit_field_name,
&mut bitfield_representable_as_int,
&mut bitfield_visibility,
),
);
if bitfield_visibility < unit_visibility {
unit_visibility = bitfield_visibility;
}
// Generating a constructor requires the bitfield to be representable as an integer.
if !bitfield_representable_as_int {
generate_ctor = false;
continue;
}
let param_name = bitfield_getter_name(ctx, bf);
let bitfield_ty_item = ctx.resolve_item(bf.ty());
let bitfield_ty = bitfield_ty_item.expect_type();
let bitfield_ty =
bitfield_ty.to_rust_ty_or_opaque(ctx, bitfield_ty_item);
ctor_params.push(quote! {
#param_name : #bitfield_ty
});
ctor_impl = bf.extend_ctor_impl(ctx, param_name, ctor_impl);
}
let access_spec = access_specifier(unit_visibility);
let field = quote! {
#access_spec #unit_field_ident : #field_ty ,
};
fields.extend(Some(field));
if generate_ctor {
methods.extend(Some(quote! {
#[inline]
#access_spec fn #ctor_name ( #( #ctor_params ),* ) -> #unit_field_ty {
let mut __bindgen_bitfield_unit: #unit_field_ty = Default::default();
#ctor_impl
__bindgen_bitfield_unit
}
}));
}
struct_layout.saw_bitfield_unit(layout);
}
}
fn bitfield_getter_name(
ctx: &BindgenContext,
bitfield: &Bitfield,
) -> proc_macro2::TokenStream {
let name = bitfield.getter_name();
let name = ctx.rust_ident_raw(name);
quote! { #name }
}
fn bitfield_setter_name(
ctx: &BindgenContext,
bitfield: &Bitfield,
) -> proc_macro2::TokenStream {
let setter = bitfield.setter_name();
let setter = ctx.rust_ident_raw(setter);
quote! { #setter }
}
impl<'a> FieldCodegen<'a> for Bitfield {
type Extra = (&'a str, &'a mut bool, &'a mut FieldVisibilityKind);
fn codegen<F, M>(
&self,
ctx: &BindgenContext,
visibility_kind: FieldVisibilityKind,
_accessor_kind: FieldAccessorKind,
parent: &CompInfo,
parent_item: &Item,
_result: &mut CodegenResult,
struct_layout: &mut StructLayoutTracker,
_fields: &mut F,
methods: &mut M,
(unit_field_name, bitfield_representable_as_int, bitfield_visibility): (
&'a str,
&mut bool,
&'a mut FieldVisibilityKind,
),
) where
F: Extend<proc_macro2::TokenStream>,
M: Extend<proc_macro2::TokenStream>,
{
let prefix = ctx.trait_prefix();
let getter_name = bitfield_getter_name(ctx, self);
let setter_name = bitfield_setter_name(ctx, self);
let unit_field_ident = Ident::new(unit_field_name, Span::call_site());
let bitfield_ty_item = ctx.resolve_item(self.ty());
let bitfield_ty = bitfield_ty_item.expect_type();
let bitfield_ty_layout = bitfield_ty
.layout(ctx)
.expect("Bitfield without layout? Gah!");
let bitfield_int_ty =
match helpers::integer_type(ctx, bitfield_ty_layout) {
Some(int_ty) => {
*bitfield_representable_as_int = true;
int_ty
}
None => {
*bitfield_representable_as_int = false;
return;
}
};
let bitfield_ty =
bitfield_ty.to_rust_ty_or_opaque(ctx, bitfield_ty_item);
let offset = self.offset_into_unit();
let width = self.width() as u8;
let override_visibility = self.name().and_then(|field_name| {
ctx.options().last_callback(|cb| {
cb.field_visibility(FieldInfo {
type_name: &parent_item.canonical_name(ctx),
field_name,
})
})
});
*bitfield_visibility = compute_visibility(
ctx,
self.is_public(),
override_visibility,
self.annotations(),
visibility_kind,
);
let access_spec = access_specifier(*bitfield_visibility);
if parent.is_union() && !struct_layout.is_rust_union() {
methods.extend(Some(quote! {
#[inline]
#access_spec fn #getter_name(&self) -> #bitfield_ty {
unsafe {
::#prefix::mem::transmute(
self.#unit_field_ident.as_ref().get(#offset, #width)
as #bitfield_int_ty
)
}
}
#[inline]
#access_spec fn #setter_name(&mut self, val: #bitfield_ty) {
unsafe {
let val: #bitfield_int_ty = ::#prefix::mem::transmute(val);
self.#unit_field_ident.as_mut().set(
#offset,
#width,
val as u64
)
}
}
}));
} else {
methods.extend(Some(quote! {
#[inline]
#access_spec fn #getter_name(&self) -> #bitfield_ty {
unsafe {
::#prefix::mem::transmute(
self.#unit_field_ident.get(#offset, #width)
as #bitfield_int_ty
)
}
}
#[inline]
#access_spec fn #setter_name(&mut self, val: #bitfield_ty) {
unsafe {
let val: #bitfield_int_ty = ::#prefix::mem::transmute(val);
self.#unit_field_ident.set(
#offset,
#width,
val as u64
)
}
}
}));
}
}
}
impl CodeGenerator for CompInfo {
type Extra = Item;
type Return = ();
fn codegen(
&self,
ctx: &BindgenContext,
result: &mut CodegenResult<'_>,
item: &Item,
) {
debug!("<CompInfo as CodeGenerator>::codegen: item = {:?}", item);
debug_assert!(item.is_enabled_for_codegen(ctx));
// Don't output classes with template parameters that aren't types, and
// also don't output template specializations, neither total or partial.
if self.has_non_type_template_params() {
return;
}
let ty = item.expect_type();
let layout = ty.layout(ctx);
let mut packed = self.is_packed(ctx, layout.as_ref());
let canonical_name = item.canonical_name(ctx);
let canonical_ident = ctx.rust_ident(&canonical_name);
// Generate the vtable from the method list if appropriate.
//
// TODO: I don't know how this could play with virtual methods that are
// not in the list of methods found by us, we'll see. Also, could the
// order of the vtable pointers vary?
//
// FIXME: Once we generate proper vtables, we need to codegen the
// vtable, but *not* generate a field for it in the case that
// HasVtable::has_vtable_ptr is false but HasVtable::has_vtable is true.
//
--> --------------------
--> maximum size reached
--> --------------------