Anforderungen  |   Konzepte  |   Entwurf  |   Entwicklung  |   Qualitätssicherung  |   Lebenszyklus  |   Steuerung
 
 
 
 


Quelle  bindings.rs   Sprache: unbekannt

 
/* This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */

#![allow(clippy::missing_safety_doc)]
#![allow(clippy::not_unsafe_ptr_arg_deref)]

use gleam::gl;
use std::cell::RefCell;
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
use std::ffi::OsString;
use std::ffi::{CStr, CString};
use std::io::Cursor;
use std::marker::PhantomData;
use std::ops::Range;
#[cfg(target_os = "android")]
use std::os::raw::c_int;
use std::os::raw::{c_char, c_float, c_void};
#[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "windows")))]
use std::os::unix::ffi::OsStringExt;
#[cfg(target_os = "windows")]
use std::os::windows::ffi::OsStringExt;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::Duration;
use std::{env, mem, ptr, slice};
use thin_vec::ThinVec;
use webrender::glyph_rasterizer::GlyphRasterThread;
use webrender::ChunkPool;

use euclid::SideOffsets2D;
use moz2d_renderer::Moz2dBlobImageHandler;
use nsstring::nsAString;
use program_cache::{remove_disk_cache, WrProgramCache};
use tracy_rs::register_thread_with_profiler;
use webrender::sw_compositor::SwCompositor;
use webrender::{
    api::units::*, api::*, create_webrender_instance, render_api::*, set_profiler_hooks, AsyncPropertySampler,
    AsyncScreenshotHandle, Compositor, LayerCompositor, CompositorCapabilities, CompositorConfig, CompositorSurfaceTransform, Device,
    MappableCompositor, MappedTileInfo, NativeSurfaceId, NativeSurfaceInfo, NativeTileId, PartialPresentCompositor,
    PipelineInfo, ProfilerHooks, RecordedFrameHandle, RenderBackendHooks, Renderer, RendererStats,
    SWGLCompositeSurfaceInfo, SceneBuilderHooks, ShaderPrecacheFlags, Shaders, SharedShaders, TextureCacheConfig,
    UploadMethod, WebRenderOptions, WindowVisibility, ONE_TIME_USAGE_HINT, CompositorInputConfig, CompositorSurfaceUsage,
};
use wr_malloc_size_of::MallocSizeOfOps;

extern "C" {
    #[cfg(target_os = "android")]
    fn __android_log_write(prio: c_int, tag: *const c_char, text: *const c_char) -> c_int;
}

/// The unique id for WR resource identification.
static NEXT_NAMESPACE_ID: AtomicUsize = AtomicUsize::new(1);

/// Special value handled in this wrapper layer to signify a redundant clip chain.
pub const ROOT_CLIP_CHAIN: u64 = !0;

fn next_namespace_id() -> IdNamespace {
    IdNamespace(NEXT_NAMESPACE_ID.fetch_add(1, Ordering::Relaxed) as u32)
}

/// Whether a border should be antialiased.
#[repr(C)]
#[derive(Eq, PartialEq, Copy, Clone)]
pub enum AntialiasBorder {
    No = 0,
    Yes,
}

/// Used to indicate if an image is opaque, or has an alpha channel.
#[repr(u8)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum OpacityType {
    Opaque = 0,
    HasAlphaChannel = 1,
}

/// cbindgen:field-names=[mHandle]
/// cbindgen:derive-lt=true
/// cbindgen:derive-lte=true
/// cbindgen:derive-neq=true
type WrEpoch = Epoch;
/// cbindgen:field-names=[mHandle]
/// cbindgen:derive-lt=true
/// cbindgen:derive-lte=true
/// cbindgen:derive-neq=true
pub type WrIdNamespace = IdNamespace;

/// cbindgen:field-names=[mNamespace, mHandle]
type WrDocumentId = DocumentId;
/// cbindgen:field-names=[mNamespace, mHandle]
type WrPipelineId = PipelineId;
/// cbindgen:field-names=[mNamespace, mHandle]
/// cbindgen:derive-neq=true
type WrImageKey = ImageKey;
/// cbindgen:field-names=[mNamespace, mHandle]
pub type WrFontKey = FontKey;
/// cbindgen:field-names=[mNamespace, mHandle]
pub type WrFontInstanceKey = FontInstanceKey;
/// cbindgen:field-names=[mNamespace, mHandle]
type WrYuvColorSpace = YuvColorSpace;
/// cbindgen:field-names=[mNamespace, mHandle]
type WrColorDepth = ColorDepth;
/// cbindgen:field-names=[mNamespace, mHandle]
type WrColorRange = ColorRange;

#[inline]
fn clip_chain_id_to_webrender(id: u64, pipeline_id: WrPipelineId) -> ClipChainId {
    if id == ROOT_CLIP_CHAIN {
        ClipChainId::INVALID
    } else {
        ClipChainId(id, pipeline_id)
    }
}

#[repr(C)]
pub struct WrSpaceAndClipChain {
    space: WrSpatialId,
    clip_chain: u64,
}

impl WrSpaceAndClipChain {
    fn to_webrender(&self, pipeline_id: WrPipelineId) -> SpaceAndClipInfo {
        //Warning: special case here to support dummy clip chain
        SpaceAndClipInfo {
            spatial_id: self.space.to_webrender(pipeline_id),
            clip_chain_id: clip_chain_id_to_webrender(self.clip_chain, pipeline_id),
        }
    }
}

#[repr(C)]
pub enum WrStackingContextClip {
    None,
    ClipChain(u64),
}

impl WrStackingContextClip {
    fn to_webrender(&self, pipeline_id: WrPipelineId) -> Option<ClipChainId> {
        match *self {
            WrStackingContextClip::None => None,
            WrStackingContextClip::ClipChain(id) => {
                if id == ROOT_CLIP_CHAIN {
                    None
                } else {
                    Some(ClipChainId(id, pipeline_id))
                }
            },
        }
    }
}

unsafe fn make_slice<'a, T>(ptr: *const T, len: usize) -> &'a [T] {
    if ptr.is_null() {
        &[]
    } else {
        slice::from_raw_parts(ptr, len)
    }
}

unsafe fn make_slice_mut<'a, T>(ptr: *mut T, len: usize) -> &'a mut [T] {
    if ptr.is_null() {
        &mut []
    } else {
        slice::from_raw_parts_mut(ptr, len)
    }
}

pub struct DocumentHandle {
    api: RenderApi,
    document_id: DocumentId,
    // One of the two options below is Some and the other None at all times.
    // It would be nice to model with an enum, however it is tricky to express
    // moving a variant's content into another variant without moving the
    // containing enum.
    hit_tester_request: Option<HitTesterRequest>,
    hit_tester: Option<Arc<dyn ApiHitTester>>,
}

impl DocumentHandle {
    pub fn new(
        api: RenderApi,
        hit_tester: Option<Arc<dyn ApiHitTester>>,
        size: DeviceIntSize,
        id: u32,
    ) -> DocumentHandle {
        let doc = api.add_document_with_id(size, id);
        let hit_tester_request = if hit_tester.is_none() {
            // Request the hit tester early to reduce the likelihood of blocking on the
            // first hit testing query.
            Some(api.request_hit_tester(doc))
        } else {
            None
        };

        DocumentHandle {
            api,
            document_id: doc,
            hit_tester_request,
            hit_tester,
        }
    }

    fn ensure_hit_tester(&mut self) -> &Arc<dyn ApiHitTester> {
        if let Some(ref ht) = self.hit_tester {
            return ht;
        }
        self.hit_tester = Some(self.hit_tester_request.take().unwrap().resolve());
        self.hit_tester.as_ref().unwrap()
    }
}

#[repr(C)]
pub struct WrVecU8 {
    /// `data` must always be valid for passing to Vec::from_raw_parts.
    /// In particular, it must be non-null even if capacity is zero.
    data: *mut u8,
    length: usize,
    capacity: usize,
}

impl WrVecU8 {
    fn into_vec(mut self) -> Vec<u8> {
        // Clear self and then drop self.
        self.flush_into_vec()
    }

    // Clears self without consuming self.
    fn flush_into_vec(&mut self) -> Vec<u8> {
        // Create a Vec using Vec::from_raw_parts.
        //
        // Here are the safety requirements, verbatim from the documentation of `from_raw_parts`:
        //
        // > * `ptr` must have been allocated using the global allocator, such as via
        // >   the [`alloc::alloc`] function.
        // > * `T` needs to have the same alignment as what `ptr` was allocated with.
        // >   (`T` having a less strict alignment is not sufficient, the alignment really
        // >   needs to be equal to satisfy the [`dealloc`] requirement that memory must be
        // >   allocated and deallocated with the same layout.)
        // > * The size of `T` times the `capacity` (ie. the allocated size in bytes) needs
        // >   to be the same size as the pointer was allocated with. (Because similar to
        // >   alignment, [`dealloc`] must be called with the same layout `size`.)
        // > * `length` needs to be less than or equal to `capacity`.
        // > * The first `length` values must be properly initialized values of type `T`.
        // > * `capacity` needs to be the capacity that the pointer was allocated with.
        // > * The allocated size in bytes must be no larger than `isize::MAX`.
        // >   See the safety documentation of [`pointer::offset`].
        //
        // These comments don't say what to do for zero-capacity vecs which don't have
        // an allocation. In particular, the requirement "`ptr` must have been allocated"
        // is not met for such vecs.
        //
        // However, the safety requirements of `slice::from_raw_parts` are more explicit
        // about the empty case:
        //
        // > * `data` must be non-null and aligned even for zero-length slices. One
        // >   reason for this is that enum layout optimizations may rely on references
        // >   (including slices of any length) being aligned and non-null to distinguish
        // >   them from other data. You can obtain a pointer that is usable as `data`
        // >   for zero-length slices using [`NonNull::dangling()`].
        //
        // For the empty case we follow this requirement rather than the more stringent
        // requirement from the `Vec::from_raw_parts` docs.
        let vec = unsafe { Vec::from_raw_parts(self.data, self.length, self.capacity) };
        self.data = ptr::NonNull::dangling().as_ptr();
        self.length = 0;
        self.capacity = 0;
        vec
    }

    pub fn as_slice(&self) -> &[u8] {
        unsafe { core::slice::from_raw_parts(self.data, self.length) }
    }

    fn from_vec(mut v: Vec<u8>) -> WrVecU8 {
        let w = WrVecU8 {
            data: v.as_mut_ptr(),
            length: v.len(),
            capacity: v.capacity(),
        };
        mem::forget(v);
        w
    }

    fn reserve(&mut self, len: usize) {
        let mut vec = self.flush_into_vec();
        vec.reserve(len);
        *self = Self::from_vec(vec);
    }

    fn push_bytes(&mut self, bytes: &[u8]) {
        let mut vec = self.flush_into_vec();
        vec.extend_from_slice(bytes);
        *self = Self::from_vec(vec);
    }
}

#[no_mangle]
pub extern "C" fn wr_vec_u8_push_bytes(v: &mut WrVecU8, bytes: ByteSlice) {
    v.push_bytes(bytes.as_slice());
}

#[no_mangle]
pub extern "C" fn wr_vec_u8_reserve(v: &mut WrVecU8, len: usize) {
    v.reserve(len);
}

#[no_mangle]
pub extern "C" fn wr_vec_u8_free(v: WrVecU8) {
    v.into_vec();
}

#[repr(C)]
pub struct ByteSlice<'a> {
    buffer: *const u8,
    len: usize,
    _phantom: PhantomData<&'a ()>,
}

impl<'a> ByteSlice<'a> {
    pub fn new(slice: &'a [u8]) -> ByteSlice<'a> {
        ByteSlice {
            buffer: slice.as_ptr(),
            len: slice.len(),
            _phantom: PhantomData,
        }
    }

    pub fn as_slice(&self) -> &'a [u8] {
        unsafe { make_slice(self.buffer, self.len) }
    }
}

#[repr(C)]
pub struct MutByteSlice<'a> {
    buffer: *mut u8,
    len: usize,
    _phantom: PhantomData<&'a ()>,
}

impl<'a> MutByteSlice<'a> {
    pub fn new(slice: &'a mut [u8]) -> MutByteSlice<'a> {
        let len = slice.len();
        MutByteSlice {
            buffer: slice.as_mut_ptr(),
            len,
            _phantom: PhantomData,
        }
    }

    pub fn as_mut_slice(&mut self) -> &'a mut [u8] {
        unsafe { make_slice_mut(self.buffer, self.len) }
    }
}

#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct WrImageDescriptor {
    pub format: ImageFormat,
    pub width: i32,
    pub height: i32,
    pub stride: i32,
    pub opacity: OpacityType,
    // TODO(gw): Remove this flag (use prim flags instead).
    pub prefer_compositor_surface: bool,
}

impl<'a> From<&'a WrImageDescriptor> for ImageDescriptor {
    fn from(desc: &'a WrImageDescriptor) -> ImageDescriptor {
        let mut flags = ImageDescriptorFlags::empty();

        if desc.opacity == OpacityType::Opaque {
            flags |= ImageDescriptorFlags::IS_OPAQUE;
        }

        ImageDescriptor {
            size: DeviceIntSize::new(desc.width, desc.height),
            stride: if desc.stride != 0 { Some(desc.stride) } else { None },
            format: desc.format,
            offset: 0,
            flags,
        }
    }
}

#[repr(u32)]
#[allow(dead_code)]
enum WrExternalImageType {
    RawData,
    NativeTexture,
    Invalid,
}

#[repr(C)]
struct WrExternalImage {
    image_type: WrExternalImageType,

    // external texture handle
    handle: u32,
    // external texture coordinate
    u0: f32,
    v0: f32,
    u1: f32,
    v1: f32,

    // external image buffer
    buff: *const u8,
    size: usize,
}

extern "C" {
    fn wr_renderer_lock_external_image(
        renderer: *mut c_void,
        external_image_id: ExternalImageId,
        channel_index: u8,
    ) -> WrExternalImage;
    fn wr_renderer_unlock_external_image(renderer: *mut c_void, external_image_id: ExternalImageId, channel_index: u8);
}

#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct WrExternalImageHandler {
    external_image_obj: *mut c_void,
}

impl ExternalImageHandler for WrExternalImageHandler {
    fn lock(&mut self, id: ExternalImageId, channel_index: u8) -> ExternalImage {
        let image = unsafe { wr_renderer_lock_external_image(self.external_image_obj, id, channel_index) };
        ExternalImage {
            uv: TexelRect::new(image.u0, image.v0, image.u1, image.v1),
            source: match image.image_type {
                WrExternalImageType::NativeTexture => ExternalImageSource::NativeTexture(image.handle),
                WrExternalImageType::RawData => {
                    ExternalImageSource::RawData(unsafe { make_slice(image.buff, image.size) })
                },
                WrExternalImageType::Invalid => ExternalImageSource::Invalid,
            },
        }
    }

    fn unlock(&mut self, id: ExternalImageId, channel_index: u8) {
        unsafe {
            wr_renderer_unlock_external_image(self.external_image_obj, id, channel_index);
        }
    }
}

#[repr(C)]
#[derive(Clone, Copy)]
// Used for ComponentTransfer only
pub struct WrFilterData {
    funcR_type: ComponentTransferFuncType,
    R_values: *mut c_float,
    R_values_count: usize,
    funcG_type: ComponentTransferFuncType,
    G_values: *mut c_float,
    G_values_count: usize,
    funcB_type: ComponentTransferFuncType,
    B_values: *mut c_float,
    B_values_count: usize,
    funcA_type: ComponentTransferFuncType,
    A_values: *mut c_float,
    A_values_count: usize,
}

#[repr(u32)]
#[derive(Debug)]
pub enum WrAnimationType {
    Transform = 0,
    Opacity = 1,
    BackgroundColor = 2,
}

#[repr(C)]
pub struct WrAnimationProperty {
    effect_type: WrAnimationType,
    id: u64,
    key: SpatialTreeItemKey,
}

/// cbindgen:derive-eq=false
#[repr(C)]
#[derive(Debug)]
pub struct WrAnimationPropertyValue<T> {
    pub id: u64,
    pub value: T,
}

pub type WrTransformProperty = WrAnimationPropertyValue<LayoutTransform>;
pub type WrOpacityProperty = WrAnimationPropertyValue<f32>;
pub type WrColorProperty = WrAnimationPropertyValue<ColorF>;

/// cbindgen:field-names=[mHandle]
/// cbindgen:derive-lt=true
/// cbindgen:derive-lte=true
#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct WrWindowId(u64);

#[repr(C)]
#[derive(Debug)]
pub struct WrComputedTransformData {
    pub scale_from: LayoutSize,
    pub vertical_flip: bool,
    pub rotation: WrRotation,
    pub key: SpatialTreeItemKey,
}

#[repr(C)]
pub struct WrTransformInfo {
    pub transform: LayoutTransform,
    pub key: SpatialTreeItemKey,
}

fn get_proc_address(glcontext_ptr: *mut c_void, name: &str) -> *const c_void {
    extern "C" {
        fn get_proc_address_from_glcontext(glcontext_ptr: *mut c_void, procname: *const c_char) -> *const c_void;
    }

    let symbol_name = CString::new(name).unwrap();
    let symbol = unsafe { get_proc_address_from_glcontext(glcontext_ptr, symbol_name.as_ptr()) };
    symbol as *const _
}

#[repr(C)]
pub enum TelemetryProbe {
    SceneBuildTime = 0,
    SceneSwapTime = 1,
    FrameBuildTime = 2,
}

extern "C" {
    fn is_in_compositor_thread() -> bool;
    fn is_in_render_thread() -> bool;
    fn is_in_main_thread() -> bool;
    fn is_glcontext_gles(glcontext_ptr: *mut c_void) -> bool;
    fn is_glcontext_angle(glcontext_ptr: *mut c_void) -> bool;
    fn gfx_wr_resource_path_override() -> *const c_char;
    fn gfx_wr_use_optimized_shaders() -> bool;
    // TODO: make gfx_critical_error() work.
    // We still have problem to pass the error message from render/render_backend
    // thread to main thread now.
    #[allow(dead_code)]
    fn gfx_critical_error(msg: *const c_char);
    fn gfx_critical_note(msg: *const c_char);
    fn gfx_wr_set_crash_annotation(annotation: CrashAnnotation, value: *const c_char);
    fn gfx_wr_clear_crash_annotation(annotation: CrashAnnotation);
}

struct CppNotifier {
    window_id: WrWindowId,
}

unsafe impl Send for CppNotifier {}

extern "C" {
    fn wr_notifier_wake_up(window_id: WrWindowId, composite_needed: bool);
    fn wr_notifier_new_frame_ready(window_id: WrWindowId, composite_needed: bool, publish_id: FramePublishId);
    fn wr_notifier_external_event(window_id: WrWindowId, raw_event: usize);
    fn wr_schedule_render(window_id: WrWindowId, reasons: RenderReasons);
    // NOTE: This moves away from pipeline_info.
    fn wr_finished_scene_build(window_id: WrWindowId, pipeline_info: &mut WrPipelineInfo);

    fn wr_transaction_notification_notified(handler: usize, when: Checkpoint);
}

impl RenderNotifier for CppNotifier {
    fn clone(&self) -> Box<dyn RenderNotifier> {
        Box::new(CppNotifier {
            window_id: self.window_id,
        })
    }

    fn wake_up(&self, composite_needed: bool) {
        unsafe {
            wr_notifier_wake_up(self.window_id, composite_needed);
        }
    }

    fn new_frame_ready(&self, _: DocumentId, _scrolled: bool, composite_needed: bool, publish_id: FramePublishId) {
        unsafe {
            wr_notifier_new_frame_ready(self.window_id, composite_needed, publish_id);
        }
    }

    fn external_event(&self, event: ExternalEvent) {
        unsafe {
            wr_notifier_external_event(self.window_id, event.unwrap());
        }
    }
}

struct MozCrashAnnotator;

unsafe impl Send for MozCrashAnnotator {}

impl CrashAnnotator for MozCrashAnnotator {
    fn set(&self, annotation: CrashAnnotation, value: &std::ffi::CStr) {
        unsafe {
            gfx_wr_set_crash_annotation(annotation, value.as_ptr());
        }
    }

    fn clear(&self, annotation: CrashAnnotation) {
        unsafe {
            gfx_wr_clear_crash_annotation(annotation);
        }
    }

    fn box_clone(&self) -> Box<dyn CrashAnnotator> {
        Box::new(MozCrashAnnotator)
    }
}

#[no_mangle]
pub extern "C" fn wr_renderer_set_clear_color(renderer: &mut Renderer, color: ColorF) {
    renderer.set_clear_color(color);
}

#[no_mangle]
pub extern "C" fn wr_renderer_set_external_image_handler(
    renderer: &mut Renderer,
    external_image_handler: &mut WrExternalImageHandler,
) {
    renderer.set_external_image_handler(Box::new(*external_image_handler));
}

#[no_mangle]
pub extern "C" fn wr_renderer_update(renderer: &mut Renderer) {
    renderer.update();
}

#[no_mangle]
pub extern "C" fn wr_renderer_set_target_frame_publish_id(renderer: &mut Renderer, publish_id: FramePublishId) {
    renderer.set_target_frame_publish_id(publish_id);
}

#[no_mangle]
pub extern "C" fn wr_renderer_render(
    renderer: &mut Renderer,
    width: i32,
    height: i32,
    buffer_age: usize,
    out_stats: &mut RendererStats,
    out_dirty_rects: &mut ThinVec<DeviceIntRect>,
) -> bool {
    match renderer.render(DeviceIntSize::new(width, height), buffer_age) {
        Ok(results) => {
            *out_stats = results.stats;
            out_dirty_rects.extend(results.dirty_rects);
            true
        },
        Err(errors) => {
            for e in errors {
                warn!(" Failed to render: {:?}", e);
                let msg = CString::new(format!("wr_renderer_render: {:?}", e)).unwrap();
                unsafe {
                    gfx_critical_note(msg.as_ptr());
                }
            }
            false
        },
    }
}

#[no_mangle]
pub extern "C" fn wr_renderer_force_redraw(renderer: &mut Renderer) {
    renderer.force_redraw();
}

#[no_mangle]
pub extern "C" fn wr_renderer_record_frame(
    renderer: &mut Renderer,
    image_format: ImageFormat,
    out_handle: &mut RecordedFrameHandle,
    out_width: &mut i32,
    out_height: &mut i32,
) -> bool {
    if let Some((handle, size)) = renderer.record_frame(image_format) {
        *out_handle = handle;
        *out_width = size.width;
        *out_height = size.height;

        true
    } else {
        false
    }
}

#[no_mangle]
pub extern "C" fn wr_renderer_map_recorded_frame(
    renderer: &mut Renderer,
    handle: RecordedFrameHandle,
    dst_buffer: *mut u8,
    dst_buffer_len: usize,
    dst_stride: usize,
) -> bool {
    renderer.map_recorded_frame(
        handle,
        unsafe { make_slice_mut(dst_buffer, dst_buffer_len) },
        dst_stride,
    )
}

#[no_mangle]
pub extern "C" fn wr_renderer_release_composition_recorder_structures(renderer: &mut Renderer) {
    renderer.release_composition_recorder_structures();
}

#[no_mangle]
pub extern "C" fn wr_renderer_get_screenshot_async(
    renderer: &mut Renderer,
    window_x: i32,
    window_y: i32,
    window_width: i32,
    window_height: i32,
    buffer_width: i32,
    buffer_height: i32,
    image_format: ImageFormat,
    screenshot_width: *mut i32,
    screenshot_height: *mut i32,
) -> AsyncScreenshotHandle {
    assert!(!screenshot_width.is_null());
    assert!(!screenshot_height.is_null());

    let (handle, size) = renderer.get_screenshot_async(
        DeviceIntRect::from_origin_and_size(
            DeviceIntPoint::new(window_x, window_y),
            DeviceIntSize::new(window_width, window_height),
        ),
        DeviceIntSize::new(buffer_width, buffer_height),
        image_format,
    );

    unsafe {
        *screenshot_width = size.width;
        *screenshot_height = size.height;
    }

    handle
}

#[no_mangle]
pub extern "C" fn wr_renderer_map_and_recycle_screenshot(
    renderer: &mut Renderer,
    handle: AsyncScreenshotHandle,
    dst_buffer: *mut u8,
    dst_buffer_len: usize,
    dst_stride: usize,
) -> bool {
    renderer.map_and_recycle_screenshot(
        handle,
        unsafe { make_slice_mut(dst_buffer, dst_buffer_len) },
        dst_stride,
    )
}

#[no_mangle]
pub extern "C" fn wr_renderer_release_profiler_structures(renderer: &mut Renderer) {
    renderer.release_profiler_structures();
}

// Call wr_renderer_render() before calling this function.
#[no_mangle]
pub unsafe extern "C" fn wr_renderer_readback(
    renderer: &mut Renderer,
    width: i32,
    height: i32,
    format: ImageFormat,
    dst_buffer: *mut u8,
    buffer_size: usize,
) {
    assert!(is_in_render_thread());

    let mut slice = make_slice_mut(dst_buffer, buffer_size);
    renderer.read_pixels_into(FramebufferIntSize::new(width, height).into(), format, &mut&nbsp;slice);
}

#[no_mangle]
pub unsafe extern "C" fn wr_renderer_set_profiler_ui(renderer: &mut Renderer, ui_str: *const u8, ui_str_len: usize) {
    let slice = std::slice::from_raw_parts(ui_str, ui_str_len);
    if let Ok(ui_str) = std::str::from_utf8(slice) {
        renderer.set_profiler_ui(ui_str);
    }
}

#[no_mangle]
pub unsafe extern "C" fn wr_renderer_delete(renderer: *mut Renderer) {
    let renderer = Box::from_raw(renderer);
    renderer.deinit();
    // let renderer go out of scope and get dropped
}

#[no_mangle]
pub unsafe extern "C" fn wr_renderer_accumulate_memory_report(
    renderer: &mut Renderer,
    report: &mut MemoryReport,
    swgl: *mut c_void,
) {
    *report += renderer.report_memory(swgl);
}

// cbindgen doesn't support tuples, so we have a little struct instead, with
// an Into implementation to convert from the tuple to the struct.
#[repr(C)]
pub struct WrPipelineEpoch {
    pipeline_id: WrPipelineId,
    document_id: WrDocumentId,
    epoch: WrEpoch,
}

impl<'a> From<(&'a (WrPipelineId, WrDocumentId), &'a WrEpoch)> for WrPipelineEpoch {
    fn from(tuple: (&(WrPipelineId, WrDocumentId), &WrEpoch)) -> WrPipelineEpoch {
        WrPipelineEpoch {
            pipeline_id: (tuple.0).0,
            document_id: (tuple.0).1,
            epoch: *tuple.1,
        }
    }
}

#[repr(C)]
pub struct WrPipelineIdAndEpoch {
    pipeline_id: WrPipelineId,
    epoch: WrEpoch,
}

impl<'a> From<(&WrPipelineId, &WrEpoch)> for WrPipelineIdAndEpoch {
    fn from(tuple: (&WrPipelineId, &WrEpoch)) -> WrPipelineIdAndEpoch {
        WrPipelineIdAndEpoch {
            pipeline_id: *tuple.0,
            epoch: *tuple.1,
        }
    }
}

#[repr(C)]
pub struct WrRemovedPipeline {
    pipeline_id: WrPipelineId,
    document_id: WrDocumentId,
}

impl<'a> From<&'a (WrPipelineId, WrDocumentId)> for WrRemovedPipeline {
    fn from(tuple: &(WrPipelineId, WrDocumentId)) -> WrRemovedPipeline {
        WrRemovedPipeline {
            pipeline_id: tuple.0,
            document_id: tuple.1,
        }
    }
}

#[repr(C)]
pub struct WrPipelineInfo {
    /// This contains an entry for each pipeline that was rendered, along with
    /// the epoch at which it was rendered. Rendered pipelines include the root
    /// pipeline and any other pipelines that were reachable via IFrame display
    /// items from the root pipeline.
    epochs: ThinVec<WrPipelineEpoch>,
    /// This contains an entry for each pipeline that was removed during the
    /// last transaction. These pipelines would have been explicitly removed by
    /// calling remove_pipeline on the transaction object; the pipeline showing
    /// up in this array means that the data structures have been torn down on
    /// the webrender side, and so any remaining data structures on the caller
    /// side can now be torn down also.
    removed_pipelines: ThinVec<WrRemovedPipeline>,
}

impl WrPipelineInfo {
    fn new(info: &PipelineInfo) -> Self {
        WrPipelineInfo {
            epochs: info.epochs.iter().map(WrPipelineEpoch::from).collect(),
            removed_pipelines: info.removed_pipelines.iter().map(WrRemovedPipeline::from).collect(),
        }
    }
}

#[no_mangle]
pub unsafe extern "C" fn wr_renderer_flush_pipeline_info(renderer: &mut Renderer, out: &mut&nbsp;WrPipelineInfo) {
    let info = renderer.flush_pipeline_info();
    *out = WrPipelineInfo::new(&info);
}

extern "C" {
    pub fn gecko_profiler_thread_is_being_profiled() -> bool;
}

pub fn gecko_profiler_start_marker(name: &str) {
    use gecko_profiler::{gecko_profiler_category, MarkerOptions, MarkerTiming, ProfilerTime, Tracing};
    gecko_profiler::add_marker(
        name,
        gecko_profiler_category!(Graphics),
        MarkerOptions {
            timing: MarkerTiming::interval_start(ProfilerTime::now()),
            ..Default::default()
        },
        Tracing::from_str("Webrender"),
    );
}
pub fn gecko_profiler_end_marker(name: &str) {
    use gecko_profiler::{gecko_profiler_category, MarkerOptions, MarkerTiming, ProfilerTime, Tracing};
    gecko_profiler::add_marker(
        name,
        gecko_profiler_category!(Graphics),
        MarkerOptions {
            timing: MarkerTiming::interval_end(ProfilerTime::now()),
            ..Default::default()
        },
        Tracing::from_str("Webrender"),
    );
}

pub fn gecko_profiler_event_marker(name: &str) {
    use gecko_profiler::{gecko_profiler_category, Tracing};
    gecko_profiler::add_marker(
        name,
        gecko_profiler_category!(Graphics),
        Default::default(),
        Tracing::from_str("Webrender"),
    );
}

pub fn gecko_profiler_add_text_marker(name: &str, text: &str, microseconds: f64) {
    use gecko_profiler::{gecko_profiler_category, MarkerOptions, MarkerTiming, ProfilerTime};
    if !gecko_profiler::can_accept_markers() {
        return;
    }

    let now = ProfilerTime::now();
    let start = now.clone().subtract_microseconds(microseconds);
    gecko_profiler::add_text_marker(
        name,
        gecko_profiler_category!(Graphics),
        MarkerOptions {
            timing: MarkerTiming::interval(start, now),
            ..Default::default()
        },
        text,
    );
}

/// Simple implementation of the WR ProfilerHooks trait to allow profile
/// markers to be seen in the Gecko profiler.
struct GeckoProfilerHooks;

impl ProfilerHooks for GeckoProfilerHooks {
    fn register_thread(&self, thread_name: &str) {
        gecko_profiler::register_thread(thread_name);
    }

    fn unregister_thread(&self) {
        gecko_profiler::unregister_thread();
    }

    fn begin_marker(&self, label: &str) {
        gecko_profiler_start_marker(label);
    }

    fn end_marker(&self, label: &str) {
        gecko_profiler_end_marker(label);
    }

    fn event_marker(&self, label: &str) {
        gecko_profiler_event_marker(label);
    }

    fn add_text_marker(&self, label: &str, text: &str, duration: Duration) {
        let micros = duration.as_micros() as f64;
        gecko_profiler_add_text_marker(label, text, micros);
    }

    fn thread_is_being_profiled(&self) -> bool {
        unsafe { gecko_profiler_thread_is_being_profiled() }
    }
}

static PROFILER_HOOKS: GeckoProfilerHooks = GeckoProfilerHooks {};

#[allow(improper_ctypes)] // this is needed so that rustc doesn't complain about passing the &mut&nbsp;Transaction to an extern function
extern "C" {
    // These callbacks are invoked from the scene builder thread (aka the APZ
    // updater thread)
    fn apz_register_updater(window_id: WrWindowId);
    fn apz_pre_scene_swap(window_id: WrWindowId);
    fn apz_post_scene_swap(window_id: WrWindowId, pipeline_info: &WrPipelineInfo);
    fn apz_run_updater(window_id: WrWindowId);
    fn apz_deregister_updater(window_id: WrWindowId);

    // These callbacks are invoked from the render backend thread (aka the APZ
    // sampler thread)
    fn apz_register_sampler(window_id: WrWindowId);
    fn apz_sample_transforms(window_id: WrWindowId, generated_frame_id: *const u64, transaction: &mut Transaction);
    fn apz_deregister_sampler(window_id: WrWindowId);

    fn omta_register_sampler(window_id: WrWindowId);
    fn omta_sample(window_id: WrWindowId, transaction: &mut Transaction);
    fn omta_deregister_sampler(window_id: WrWindowId);
}

struct APZCallbacks {
    window_id: WrWindowId,
}

impl APZCallbacks {
    pub fn new(window_id: WrWindowId) -> Self {
        APZCallbacks { window_id }
    }
}

impl SceneBuilderHooks for APZCallbacks {
    fn register(&self) {
        unsafe {
            if static_prefs::pref!("gfx.webrender.scene-builder-thread-local-arena") {
                wr_register_thread_local_arena();
            }
            apz_register_updater(self.window_id);
        }
    }

    fn pre_scene_build(&self) {
        gecko_profiler_start_marker("SceneBuilding");
    }

    fn pre_scene_swap(&self) {
        unsafe {
            apz_pre_scene_swap(self.window_id);
        }
    }

    fn post_scene_swap(&self, _document_ids: &Vec<DocumentId>, info: PipelineInfo) {
        let mut info = WrPipelineInfo::new(&info);
        unsafe {
            apz_post_scene_swap(self.window_id, &info);
        }

        // After a scene swap we should schedule a render for the next vsync,
        // otherwise there's no guarantee that the new scene will get rendered
        // anytime soon
        unsafe { wr_finished_scene_build(self.window_id, &mut info) }
        gecko_profiler_end_marker("SceneBuilding");
    }

    fn post_resource_update(&self, _document_ids: &Vec<DocumentId>) {
        unsafe { wr_schedule_render(self.window_id, RenderReasons::POST_RESOURCE_UPDATES_HOOK) }
        gecko_profiler_end_marker("SceneBuilding");
    }

    fn post_empty_scene_build(&self) {
        gecko_profiler_end_marker("SceneBuilding");
    }

    fn poke(&self) {
        unsafe { apz_run_updater(self.window_id) }
    }

    fn deregister(&self) {
        unsafe { apz_deregister_updater(self.window_id) }
    }
}

struct RenderBackendCallbacks;

impl RenderBackendHooks for RenderBackendCallbacks {
    fn init_thread(&self) {
        if static_prefs::pref!("gfx.webrender.frame-builder-thread-local-arena") {
            unsafe { wr_register_thread_local_arena() };
        }
    }
}

struct SamplerCallback {
    window_id: WrWindowId,
}

impl SamplerCallback {
    pub fn new(window_id: WrWindowId) -> Self {
        SamplerCallback { window_id }
    }
}

impl AsyncPropertySampler for SamplerCallback {
    fn register(&self) {
        unsafe {
            apz_register_sampler(self.window_id);
            omta_register_sampler(self.window_id);
        }
    }

    fn sample(&self, _document_id: DocumentId, generated_frame_id: Option<u64>) -> Vec<FrameMsg> {
        let generated_frame_id_value;
        let generated_frame_id: *const u64 = match generated_frame_id {
            Some(id) => {
                generated_frame_id_value = id;
                &generated_frame_id_value
            },
            None => ptr::null_mut(),
        };
        let mut transaction = Transaction::new();
        // Reset the pending properties first because omta_sample and apz_sample_transforms
        // may be failed to reset them due to null samplers.
        transaction.reset_dynamic_properties();
        unsafe {
            apz_sample_transforms(self.window_id, generated_frame_id, &mut transaction);
            omta_sample(self.window_id, &mut transaction);
        };
        transaction.get_frame_ops()
    }

    fn deregister(&self) {
        unsafe {
            apz_deregister_sampler(self.window_id);
            omta_deregister_sampler(self.window_id);
        }
    }
}

extern "C" {
    fn wr_register_thread_local_arena();
}

pub struct WrThreadPool(Arc<rayon::ThreadPool>);

#[no_mangle]
pub extern "C" fn wr_thread_pool_new(low_priority: bool) -> *mut WrThreadPool {
    // Clamp the number of workers between 1 and 4/8. We get diminishing returns
    // with high worker counts and extra overhead because of rayon and font
    // management.

    // We clamp to 4 high priority threads because contention and memory usage
    // make it not worth going higher
    let max = if low_priority { 8 } else { 4 };
    let num_threads = num_cpus::get().min(max);

    let priority_tag = if low_priority { "LP" } else { "" };

    let use_thread_local_arena = static_prefs::pref!("gfx.webrender.worker-thread-local-arena");

    let worker = rayon::ThreadPoolBuilder::new()
        .thread_name(move |idx| format!("WRWorker{}#{}", priority_tag, idx))
        .num_threads(num_threads)
        .start_handler(move |idx| {
            if use_thread_local_arena {
                unsafe {
                    wr_register_thread_local_arena();
                }
            }
            let name = format!("WRWorker{}#{}", priority_tag, idx);
            register_thread_with_profiler(name.clone());
            gecko_profiler::register_thread(&name);
        })
        .exit_handler(|_idx| {
            gecko_profiler::unregister_thread();
        })
        .build();

    let workers = Arc::new(worker.unwrap());

    Box::into_raw(Box::new(WrThreadPool(workers)))
}

#[no_mangle]
pub unsafe extern "C" fn wr_thread_pool_delete(thread_pool: *mut WrThreadPool) {
    mem::drop(Box::from_raw(thread_pool));
}

pub struct WrChunkPool(Arc<ChunkPool>);

#[no_mangle]
pub unsafe extern "C" fn wr_chunk_pool_new() -> *mut WrChunkPool {
    Box::into_raw(Box::new(WrChunkPool(Arc::new(ChunkPool::new()))))
}

#[no_mangle]
pub unsafe extern "C" fn wr_chunk_pool_delete(pool: *mut WrChunkPool) {
    mem::drop(Box::from_raw(pool));
}

#[no_mangle]
pub unsafe extern "C" fn wr_chunk_pool_purge(pool: &WrChunkPool) {
    pool.0.purge_all_chunks();
}

#[no_mangle]
pub unsafe extern "C" fn wr_program_cache_new(
    prof_path: &nsAString,
    thread_pool: *mut WrThreadPool,
) -> *mut WrProgramCache {
    let workers = &(*thread_pool).0;
    let program_cache = WrProgramCache::new(prof_path, workers);
    Box::into_raw(Box::new(program_cache))
}

#[no_mangle]
pub unsafe extern "C" fn wr_program_cache_delete(program_cache: *mut WrProgramCache) {
    mem::drop(Box::from_raw(program_cache));
}

#[no_mangle]
pub unsafe extern "C" fn wr_try_load_startup_shaders_from_disk(program_cache: *mut WrProgramCache) {
    (*program_cache).try_load_startup_shaders_from_disk();
}

#[no_mangle]
pub unsafe extern "C" fn remove_program_binary_disk_cache(prof_path: &nsAString) -> bool {
    match remove_disk_cache(prof_path) {
        Ok(_) => true,
        Err(_) => {
            error!("Failed to remove program binary disk cache");
            false
        },
    }
}

// This matches IsEnvSet in gfxEnv.h
fn env_var_to_bool(key: &'static str) -> bool {
    env::var(key).ok().map_or(false, |v| !v.is_empty())
}

// Call MakeCurrent before this.
fn wr_device_new(gl_context: *mut c_void, pc: Option<&mut WrProgramCache>) -> Device {
    assert!(unsafe { is_in_render_thread() });

    let gl;
    if unsafe { is_glcontext_gles(gl_context) } {
        gl = unsafe { gl::GlesFns::load_with(|symbol| get_proc_address(gl_context, symbol)) };
    } else {
        gl = unsafe { gl::GlFns::load_with(|symbol| get_proc_address(gl_context, symbol)) };
    }

    let version = gl.get_string(gl::VERSION);

    info!("WebRender - OpenGL version new {}", version);

    let upload_method = if unsafe { is_glcontext_angle(gl_context) } {
        UploadMethod::Immediate
    } else {
        UploadMethod::PixelBuffer(ONE_TIME_USAGE_HINT)
    };

    let resource_override_path = unsafe {
        let override_charptr = gfx_wr_resource_path_override();
        if override_charptr.is_null() {
            None
        } else {
            match CStr::from_ptr(override_charptr).to_str() {
                Ok(override_str) => Some(PathBuf::from(override_str)),
                _ => None,
            }
        }
    };

    let use_optimized_shaders = unsafe { gfx_wr_use_optimized_shaders() };

    let cached_programs = pc.map(|cached_programs| Rc::clone(cached_programs.rc_get()));

    Device::new(
        gl,
        Some(Box::new(MozCrashAnnotator)),
        resource_override_path,
        use_optimized_shaders,
        upload_method,
        512 * 512,
        cached_programs,
        true,
        true,
        None,
        false,
        false,
    )
}

extern "C" {
    fn wr_compositor_create_surface(
        compositor: *mut c_void,
        id: NativeSurfaceId,
        virtual_offset: DeviceIntPoint,
        tile_size: DeviceIntSize,
        is_opaque: bool,
    );
    fn wr_compositor_create_swapchain_surface(
        compositor: *mut c_void,
        id: NativeSurfaceId,
        size: DeviceIntSize,
        is_opaque: bool,
    );
    fn wr_compositor_resize_swapchain(compositor: *mut c_void, id: NativeSurfaceId, size: DeviceIntSize);
    fn wr_compositor_create_external_surface(compositor: *mut c_void, id: NativeSurfaceId, is_opaque: bool);
    fn wr_compositor_create_backdrop_surface(compositor: *mut c_void, id: NativeSurfaceId, color: ColorF);
    fn wr_compositor_destroy_surface(compositor: *mut c_void, id: NativeSurfaceId);
    fn wr_compositor_create_tile(compositor: *mut c_void, id: NativeSurfaceId, x: i32, y: i32);
    fn wr_compositor_destroy_tile(compositor: *mut c_void, id: NativeSurfaceId, x: i32, y: i32);
    fn wr_compositor_attach_external_image(
        compositor: *mut c_void,
        id: NativeSurfaceId,
        external_image: ExternalImageId,
    );
    fn wr_compositor_bind(
        compositor: *mut c_void,
        id: NativeTileId,
        offset: &mut DeviceIntPoint,
        fbo_id: &mut u32,
        dirty_rect: DeviceIntRect,
        valid_rect: DeviceIntRect,
    );
    fn wr_compositor_unbind(compositor: *mut c_void);
    fn wr_compositor_begin_frame(compositor: *mut c_void);
    fn wr_compositor_add_surface(
        compositor: *mut c_void,
        id: NativeSurfaceId,
        transform: &CompositorSurfaceTransform,
        clip_rect: DeviceIntRect,
        image_rendering: ImageRendering,
    );
    fn wr_compositor_start_compositing(
        compositor: *mut c_void,
        clear_color: ColorF,
        dirty_rects: *const DeviceIntRect,
        num_dirty_rects: usize,
        opaque_rects: *const DeviceIntRect,
        num_opaque_rects: usize,
    );
    fn wr_compositor_end_frame(compositor: *mut c_void);
    fn wr_compositor_enable_native_compositor(compositor: *mut c_void, enable: bool);
    fn wr_compositor_deinit(compositor: *mut c_void);
    fn wr_compositor_get_capabilities(compositor: *mut c_void, caps: *mut CompositorCapabilities);
    fn wr_compositor_get_window_visibility(compositor: *mut c_void, caps: *mut WindowVisibility);
    fn wr_compositor_bind_swapchain(compositor: *mut c_void, id: NativeSurfaceId);
    fn wr_compositor_present_swapchain(compositor: *mut c_void, id: NativeSurfaceId);
    fn wr_compositor_map_tile(
        compositor: *mut c_void,
        id: NativeTileId,
        dirty_rect: DeviceIntRect,
        valid_rect: DeviceIntRect,
        data: &mut *mut c_void,
        stride: &mut i32,
    );
    fn wr_compositor_unmap_tile(compositor: *mut c_void);

    fn wr_partial_present_compositor_set_buffer_damage_region(
        compositor: *mut c_void,
        rects: *const DeviceIntRect,
        n_rects: usize,
    );
}

pub struct WrCompositor(*mut c_void);

impl Compositor for WrCompositor {
    fn create_surface(
        &mut self,
        _device: &mut Device,
        id: NativeSurfaceId,
        virtual_offset: DeviceIntPoint,
        tile_size: DeviceIntSize,
        is_opaque: bool,
    ) {
        unsafe {
            wr_compositor_create_surface(self.0, id, virtual_offset, tile_size, is_opaque);
        }
    }

    fn create_external_surface(&mut self, _device: &mut Device, id: NativeSurfaceId, is_opaque: bool) {
        unsafe {
            wr_compositor_create_external_surface(self.0, id, is_opaque);
        }
    }

    fn create_backdrop_surface(&mut self, _device: &mut Device, id: NativeSurfaceId, color: ColorF) {
        unsafe {
            wr_compositor_create_backdrop_surface(self.0, id, color);
        }
    }

    fn destroy_surface(&mut self, _device: &mut Device, id: NativeSurfaceId) {
        unsafe {
            wr_compositor_destroy_surface(self.0, id);
        }
    }

    fn create_tile(&mut self, _device: &mut Device, id: NativeTileId) {
        unsafe {
            wr_compositor_create_tile(self.0, id.surface_id, id.x, id.y);
        }
    }

    fn destroy_tile(&mut self, _device: &mut Device, id: NativeTileId) {
        unsafe {
            wr_compositor_destroy_tile(self.0, id.surface_id, id.x, id.y);
        }
    }

    fn attach_external_image(&mut self, _device: &mut Device, id: NativeSurfaceId, external_image: ExternalImageId) {
        unsafe {
            wr_compositor_attach_external_image(self.0, id, external_image);
        }
    }

    fn bind(
        &mut self,
        _device: &mut Device,
        id: NativeTileId,
        dirty_rect: DeviceIntRect,
        valid_rect: DeviceIntRect,
    ) -> NativeSurfaceInfo {
        let mut surface_info = NativeSurfaceInfo {
            origin: DeviceIntPoint::zero(),
            fbo_id: 0,
        };

        unsafe {
            wr_compositor_bind(
                self.0,
                id,
                &mut surface_info.origin,
                &mut surface_info.fbo_id,
                dirty_rect,
                valid_rect,
            );
        }

        surface_info
    }

    fn unbind(&mut self, _device: &mut Device) {
        unsafe {
            wr_compositor_unbind(self.0);
        }
    }

    fn begin_frame(&mut self, _device: &mut Device) {
        unsafe {
            wr_compositor_begin_frame(self.0);
        }
    }

    fn add_surface(
        &mut self,
        _device: &mut Device,
        id: NativeSurfaceId,
        transform: CompositorSurfaceTransform,
        clip_rect: DeviceIntRect,
        image_rendering: ImageRendering,
    ) {
        unsafe {
            wr_compositor_add_surface(self.0, id, &transform, clip_rect, image_rendering);
        }
    }

    fn start_compositing(
        &mut self,
        _device: &mut Device,
        clear_color: ColorF,
        dirty_rects: &[DeviceIntRect],
        opaque_rects: &[DeviceIntRect],
    ) {
        unsafe {
            wr_compositor_start_compositing(
                self.0,
                clear_color,
                dirty_rects.as_ptr(),
                dirty_rects.len(),
                opaque_rects.as_ptr(),
                opaque_rects.len(),
            );
        }
    }

    fn end_frame(&mut self, _device: &mut Device) {
        unsafe {
            wr_compositor_end_frame(self.0);
        }
    }

    fn enable_native_compositor(&mut self, _device: &mut Device, enable: bool) {
        unsafe {
            wr_compositor_enable_native_compositor(self.0, enable);
        }
    }

    fn deinit(&mut self, _device: &mut Device) {
        unsafe {
            wr_compositor_deinit(self.0);
        }
    }

    fn get_capabilities(&self, _device: &mut Device) -> CompositorCapabilities {
        unsafe {
            let mut caps: CompositorCapabilities = Default::default();
            wr_compositor_get_capabilities(self.0, &mut caps);
            caps
        }
    }

    fn get_window_visibility(&self, _device: &mut Device) -> WindowVisibility {
        unsafe {
            let mut visibility: WindowVisibility = Default::default();
            wr_compositor_get_window_visibility(self.0, &mut visibility);
            visibility
        }
    }
}

struct NativeLayer {
    id: NativeSurfaceId,
    size: DeviceIntSize,
    is_opaque: bool,
    frames_since_used: usize,
    usage: CompositorSurfaceUsage,
}

pub struct WrLayerCompositor {
    compositor: *mut c_void,
    next_layer_id: u64,
    surface_pool: Vec<NativeLayer>,
    visual_tree: Vec<NativeLayer>,
}

impl WrLayerCompositor {
    fn new(compositor: *mut c_void) -> Self {
        WrLayerCompositor {
            compositor,
            next_layer_id: 0,
            surface_pool: Vec::new(),
            visual_tree: Vec::new(),
        }
    }
}

impl LayerCompositor for WrLayerCompositor {
    // Begin compositing a frame with the supplied input config
    fn begin_frame(
        &mut self,
        input: &CompositorInputConfig,
    ) {
        unsafe {
            wr_compositor_begin_frame(self.compositor);
        }

        assert!(self.visual_tree.is_empty());

        for request in input.layers {
            let size = request.clip_rect.size();

            let existing_index = self.surface_pool.iter().position(|layer| {
                layer.is_opaque == request.is_opaque &&
                layer.usage.matches(&request.usage)
            });

            let mut layer = match existing_index {
                Some(existing_index) => {
                    let mut layer = self.surface_pool.swap_remove(existing_index);

                    layer.frames_since_used = 0;

                    // Copy across (potentially) updated external image id
                    layer.usage = request.usage;

                    layer
                }
                None => {
                    let id = NativeSurfaceId(self.next_layer_id);
                    self.next_layer_id += 1;

                    unsafe {
                        match request.usage {
                            CompositorSurfaceUsage::Content => {
                                wr_compositor_create_swapchain_surface(
                                    self.compositor,
                                    id,
                                    size,
                                    request.is_opaque,
                                );
                            }
                            CompositorSurfaceUsage::External { .. } => {
                                wr_compositor_create_external_surface(
                                    self.compositor,
                                    id,
                                    request.is_opaque,
                                );
                            }
                        }
                    }

                    NativeLayer {
                        id,
                        size,
                        is_opaque: request.is_opaque,
                        frames_since_used: 0,
                        usage: request.usage,
                    }
                }
            };

            match layer.usage {
                CompositorSurfaceUsage::Content => {
                    if layer.size.width != size.width || layer.size.height != size.height {
                        unsafe {
                            wr_compositor_resize_swapchain(
                                self.compositor,
                                layer.id,
                                size
                            );
                        }
                        layer.size = size;
                    }
                }
                CompositorSurfaceUsage::External { external_image_id, .. } => {
                    unsafe {
                        wr_compositor_attach_external_image(
                            self.compositor,
                            layer.id,
                            external_image_id,
                        );
                    }
                }
            }

            self.visual_tree.push(layer);
        }

        for layer in &mut self.surface_pool {
            layer.frames_since_used += 1;
        }
    }

    // Bind a layer by index for compositing into
    fn bind_layer(&mut self, index: usize) {
        let layer = &self.visual_tree[index];

        unsafe {
            wr_compositor_bind_swapchain(
                self.compositor,
                layer.id,
            );
        }
    }

    // Finish compositing a layer and present the swapchain
    fn present_layer(&mut self, index: usize) {
        let layer = &self.visual_tree[index];

        unsafe {
            wr_compositor_present_swapchain(
                self.compositor,
                layer.id,
            );
        }
    }

    fn add_surface(
        &mut self,
        index: usize,
        transform: CompositorSurfaceTransform,
        clip_rect: DeviceIntRect,
        image_rendering: ImageRendering,
    ) {
        let layer = &self.visual_tree[index];

        unsafe {
            wr_compositor_add_surface(
                self.compositor,
                layer.id,
                &transform,
                clip_rect,
                image_rendering,
            );
        }
    }

    // Finish compositing this frame
    fn end_frame(&mut self) {
        unsafe {
            wr_compositor_end_frame(self.compositor);
        }

        // Destroy any unused surface pool entries
        let mut layers_to_destroy = Vec::new();

        self.surface_pool.retain(|layer| {
            let keep = layer.frames_since_used < 3;

            if !keep {
                layers_to_destroy.push(layer.id);
            }

            keep
        });

        for layer_id in layers_to_destroy {
            unsafe {
                wr_compositor_destroy_surface(self.compositor, layer_id);
            }
        }

        self.surface_pool.append(&mut self.visual_tree);
    }
}

impl Drop for WrLayerCompositor {
    fn drop(&mut self) {
        for layer in self.surface_pool.iter().chain(self.visual_tree.iter()) {
            unsafe {
                wr_compositor_destroy_surface(self.compositor, layer.id);
            }
        }
    }
}

extern "C" {
    fn wr_swgl_lock_composite_surface(
        ctx: *mut c_void,
        external_image_id: ExternalImageId,
        composite_info: *mut SWGLCompositeSurfaceInfo,
    ) -> bool;
    fn wr_swgl_unlock_composite_surface(ctx: *mut c_void, external_image_id: ExternalImageId);
}

impl MappableCompositor for WrCompositor {
    /// Map a tile's underlying buffer so it can be used as the backing for
    /// a SWGL framebuffer. This is intended to be a replacement for 'bind'
    /// in any compositors that intend to directly interoperate with SWGL
    /// while supporting some form of native layers.
    fn map_tile(
        &mut self,
        _device: &mut Device,
        id: NativeTileId,
        dirty_rect: DeviceIntRect,
        valid_rect: DeviceIntRect,
    ) -> Option<MappedTileInfo> {
        let mut tile_info = MappedTileInfo {
            data: ptr::null_mut(),
            stride: 0,
        };

        unsafe {
            wr_compositor_map_tile(
                self.0,
                id,
                dirty_rect,
                valid_rect,
                &mut tile_info.data,
                &mut tile_info.stride,
            );
        }

        if !tile_info.data.is_null() && tile_info.stride != 0 {
            Some(tile_info)
        } else {
            None
        }
    }

    /// Unmap a tile that was was previously mapped via map_tile to signal
    /// that SWGL is done rendering to the buffer.
    fn unmap_tile(&mut self, _device: &mut Device) {
        unsafe {
            wr_compositor_unmap_tile(self.0);
        }
    }

    fn lock_composite_surface(
        &mut self,
        _device: &mut Device,
        ctx: *mut c_void,
        external_image_id: ExternalImageId,
        composite_info: *mut SWGLCompositeSurfaceInfo,
    ) -> bool {
        unsafe { wr_swgl_lock_composite_surface(ctx, external_image_id, composite_info) }
    }
    fn unlock_composite_surface(&mut self, _device: &mut Device, ctx: *mut c_void, external_image_id: ExternalImageId) {
        unsafe { wr_swgl_unlock_composite_surface(ctx, external_image_id) }
    }
}

pub struct WrPartialPresentCompositor(*mut c_void);

impl PartialPresentCompositor for WrPartialPresentCompositor {
    fn set_buffer_damage_region(&mut self, rects: &[DeviceIntRect]) {
        unsafe {
            wr_partial_present_compositor_set_buffer_damage_region(self.0, rects.as_ptr(), rects.len());
        }
    }
}

/// A wrapper around a strong reference to a Shaders object.
pub struct WrShaders(SharedShaders);

pub struct WrGlyphRasterThread(GlyphRasterThread);

#[no_mangle]
pub extern "C" fn wr_glyph_raster_thread_new() -> *mut WrGlyphRasterThread {
    let thread = GlyphRasterThread::new(
        || {
            gecko_profiler::register_thread("WrGlyphRasterizer");
        },
        || {
            gecko_profiler::unregister_thread();
        },
    );

    match thread {
        Ok(thread) => {
            return Box::into_raw(Box::new(WrGlyphRasterThread(thread)));
        },
        Err(..) => {
            return std::ptr::null_mut();
        },
    }
}

#[no_mangle]
pub extern "C" fn wr_glyph_raster_thread_delete(thread: *mut WrGlyphRasterThread) {
    let thread = unsafe { Box::from_raw(thread) };
    thread.0.shut_down();
}

// Call MakeCurrent before this.
#[no_mangle]
pub extern "C" fn wr_window_new(
    window_id: WrWindowId,
    window_width: i32,
    window_height: i32,
    is_main_window: bool,
    support_low_priority_transactions: bool,
    support_low_priority_threadpool: bool,
    allow_texture_swizzling: bool,
    allow_scissored_cache_clears: bool,
    swgl_context: *mut c_void,
    gl_context: *mut c_void,
    surface_origin_is_top_left: bool,
    program_cache: Option<&mut WrProgramCache>,
    shaders: Option<&mut WrShaders>,
    thread_pool: *mut WrThreadPool,
    thread_pool_low_priority: *mut WrThreadPool,
    chunk_pool: &WrChunkPool,
    glyph_raster_thread: Option<&WrGlyphRasterThread>,
    size_of_op: VoidPtrToSizeFn,
    enclosing_size_of_op: VoidPtrToSizeFn,
    document_id: u32,
    compositor: *mut c_void,
    use_native_compositor: bool,
    use_partial_present: bool,
    max_partial_present_rects: usize,
    draw_previous_partial_present_regions: bool,
    out_handle: &mut *mut DocumentHandle,
    out_renderer: &mut *mut Renderer,
    out_max_texture_size: *mut i32,
    out_err: &mut *mut c_char,
    enable_gpu_markers: bool,
    panic_on_gl_error: bool,
    picture_tile_width: i32,
    picture_tile_height: i32,
    reject_software_rasterizer: bool,
    low_quality_pinch_zoom: bool,
    max_shared_surface_size: i32,
    enable_subpixel_aa: bool,
    use_layer_compositor: bool,
) -> bool {
    assert!(unsafe { is_in_render_thread() });

    // Ensure the WR profiler callbacks are hooked up to the Gecko profiler.
    set_profiler_hooks(Some(&PROFILER_HOOKS));

    let software = !swgl_context.is_null();
    let (gl, sw_gl) = if software {
        let ctx = swgl::Context::from(swgl_context);
        ctx.make_current();
        (Rc::new(ctx) as Rc<dyn gl::Gl>, Some(ctx))
    } else {
        let gl = unsafe {
            if gl_context.is_null() {
                panic!("Native GL context required when not using SWGL!");
            } else if is_glcontext_gles(gl_context) {
                gl::GlesFns::load_with(|symbol| get_proc_address(gl_context, symbol))
            } else {
                gl::GlFns::load_with(|symbol| get_proc_address(gl_context, symbol))
            }
        };
        (gl, None)
    };

    let version = gl.get_string(gl::VERSION);

    info!("WebRender - OpenGL version new {}", version);

    let workers = unsafe { Arc::clone(&(*thread_pool).0) };
    let workers_low_priority = unsafe {
        if support_low_priority_threadpool {
            Arc::clone(&(*thread_pool_low_priority).0)
        } else {
            Arc::clone(&(*thread_pool).0)
        }
    };

    let upload_method = if !gl_context.is_null() && unsafe { is_glcontext_angle(gl_context) } {
        UploadMethod::Immediate
    } else {
        UploadMethod::PixelBuffer(ONE_TIME_USAGE_HINT)
    };

    let precache_flags = if env_var_to_bool("MOZ_WR_PRECACHE_SHADERS") {
        ShaderPrecacheFlags::FULL_COMPILE
    } else {
        ShaderPrecacheFlags::empty()
    };

    let cached_programs = program_cache.map(|program_cache| Rc::clone(&program_cache.rc_get()));

    let color = if cfg!(target_os = "android") {
        // The color is for avoiding black flash before receiving display list.
        ColorF::new(1.0, 1.0, 1.0, 1.0)
    } else {
        ColorF::new(0.0, 0.0, 0.0, 0.0)
    };

    let compositor_config = if software {
        CompositorConfig::Native {
            compositor: Box::new(SwCompositor::new(
                sw_gl.unwrap(),
                    Box::new(WrCompositor(compositor)),
                    use_native_compositor,
                )),
            }
    } else if use_native_compositor {
        if use_layer_compositor {
            CompositorConfig::Layer {
                compositor: Box::new(WrLayerCompositor::new(compositor)),
            }
        } else {
            CompositorConfig::Native {
                compositor: Box::new(WrCompositor(compositor)),
            }
        }
    } else {
        CompositorConfig::Draw {
            max_partial_present_rects,
            draw_previous_partial_present_regions,
            partial_present: if use_partial_present {
                Some(Box::new(WrPartialPresentCompositor(compositor)))
            } else {
                None
            },
        }
    };

    let picture_tile_size = if picture_tile_width > 0 && picture_tile_height > 0 {
        Some(DeviceIntSize::new(picture_tile_width, picture_tile_height))
    } else {
        None
    };

    let texture_cache_config = if is_main_window {
        TextureCacheConfig::DEFAULT
    } else {
        TextureCacheConfig {
            color8_linear_texture_size: 512,
            color8_nearest_texture_size: 512,
            color8_glyph_texture_size: 512,
            alpha8_texture_size: 512,
            alpha8_glyph_texture_size: 512,
            alpha16_texture_size: 512,
        }
    };

    let opts = WebRenderOptions {
        enable_aa: true,
        enable_subpixel_aa,
        support_low_priority_transactions,
        allow_texture_swizzling,
        blob_image_handler: Some(Box::new(Moz2dBlobImageHandler::new(
            workers.clone(),
            workers_low_priority,
        ))),
        crash_annotator: Some(Box::new(MozCrashAnnotator)),
        workers: Some(workers),
        chunk_pool: Some(chunk_pool.0.clone()),
        dedicated_glyph_raster_thread: glyph_raster_thread.map(|grt| grt.0.clone()),
        size_of_op: Some(size_of_op),
        enclosing_size_of_op: Some(enclosing_size_of_op),
        cached_programs,
        resource_override_path: unsafe {
            let override_charptr = gfx_wr_resource_path_override();
            if override_charptr.is_null() {
                None
            } else {
                match CStr::from_ptr(override_charptr).to_str() {
                    Ok(override_str) => Some(PathBuf::from(override_str)),
                    _ => None,
                }
            }
        },
        use_optimized_shaders: unsafe { gfx_wr_use_optimized_shaders() },
        renderer_id: Some(window_id.0),
        upload_method,
        scene_builder_hooks: Some(Box::new(APZCallbacks::new(window_id))),
        render_backend_hooks: Some(Box::new(RenderBackendCallbacks)),
        sampler: Some(Box::new(SamplerCallback::new(window_id))),
        max_internal_texture_size: Some(8192), // We want to tile if larger than this
        clear_color: color,
        precache_flags,
        namespace_alloc_by_client: true,
        // Font namespace must be allocated by the client
        shared_font_namespace: Some(next_namespace_id()),
        // SWGL doesn't support the GL_ALWAYS depth comparison function used by
        // `clear_caches_with_quads`, but scissored clears work well.
        clear_caches_with_quads: !software && !allow_scissored_cache_clears,
        // SWGL supports KHR_blend_equation_advanced safely, but we haven't yet
        // tested other HW platforms determine if it is safe to allow them.
        allow_advanced_blend_equation: software,
        surface_origin_is_top_left,
        compositor_config,
        enable_gpu_markers,
        panic_on_gl_error,
        picture_tile_size,
        texture_cache_config,
        reject_software_rasterizer,
        low_quality_pinch_zoom,
        max_shared_surface_size,
        ..Default::default()
    };

    let window_size = DeviceIntSize::new(window_width, window_height);
    let notifier = Box::new(CppNotifier { window_id });
    let (renderer, sender) = match create_webrender_instance(gl, notifier, opts, shaders.map(|sh| &sh.0)) {
        Ok((renderer, sender)) => (renderer, sender),
        Err(e) => {
            warn!(" Failed to create a Renderer: {:?}", e);
            let msg = CString::new(format!("wr_window_new: {:?}", e)).unwrap();
            unsafe {
                gfx_critical_note(msg.as_ptr());
            }
            *out_err = msg.into_raw();
            return false;
        },
    };

    unsafe {
        *out_max_texture_size = renderer.get_max_texture_size();
    }
    *out_handle = Box::into_raw(Box::new(DocumentHandle::new(
        sender.create_api_by_client(next_namespace_id()),
        None,
        window_size,
        document_id,
    )));
    *out_renderer = Box::into_raw(Box::new(renderer));

    true
}

#[no_mangle]
pub unsafe extern "C" fn wr_api_free_error_msg(msg: *mut c_char) {
    if !msg.is_null() {
        drop(CString::from_raw(msg));
    }
}

#[no_mangle]
pub unsafe extern "C" fn wr_api_delete_document(dh: &mut DocumentHandle) {
    dh.api.delete_document(dh.document_id);
}

#[no_mangle]
pub extern "C" fn wr_api_clone(dh: &mut DocumentHandle, out_handle: &mut *mut DocumentHandle) {
    assert!(unsafe { is_in_compositor_thread() });

    let hit_tester = dh.ensure_hit_tester().clone();

    let handle = DocumentHandle {
        api: dh.api.create_sender().create_api_by_client(next_namespace_id()),
        document_id: dh.document_id,
        hit_tester: Some(hit_tester),
        hit_tester_request: None,
    };
    *out_handle = Box::into_raw(Box::new(handle));
}

#[no_mangle]
pub unsafe extern "C" fn wr_api_delete(dh: *mut DocumentHandle) {
    let _ = Box::from_raw(dh);
}

#[no_mangle]
pub unsafe extern "C" fn wr_api_stop_render_backend(dh: &mut DocumentHandle) {
    dh.api.stop_render_backend();
}

#[no_mangle]
pub unsafe extern "C" fn wr_api_shut_down(dh: &mut DocumentHandle) {
    dh.api.shut_down(true);
}

#[no_mangle]
pub unsafe extern "C" fn wr_api_notify_memory_pressure(dh: &mut DocumentHandle) {
    dh.api.notify_memory_pressure();
}

#[no_mangle]
pub extern "C" fn wr_api_set_debug_flags(dh: &mut DocumentHandle, flags: DebugFlags) {
    dh.api.set_debug_flags(flags);
}

#[no_mangle]
pub extern "C" fn wr_api_set_bool(dh: &mut DocumentHandle, param_name: BoolParameter, val: bool) {
    dh.api.set_parameter(Parameter::Bool(param_name, val));
}

#[no_mangle]
pub extern "C" fn wr_api_set_int(dh: &mut DocumentHandle, param_name: IntParameter, val: i32) {
    dh.api.set_parameter(Parameter::Int(param_name, val));
}

#[no_mangle]
pub extern "C" fn wr_api_set_float(dh: &mut DocumentHandle, param_name: FloatParameter, val: f32) {
    dh.api.set_parameter(Parameter::Float(param_name, val));
}

#[no_mangle]
pub unsafe extern "C" fn wr_api_accumulate_memory_report(
    dh: &mut DocumentHandle,
--> --------------------

--> maximum size reached

--> --------------------

[ Dauer der Verarbeitung: 0.11 Sekunden  (vorverarbeitet)  ]

                                                                                                                                                                                                                                                                                                                                                                                                     


Neuigkeiten

     Aktuelles
     Motto des Tages

Software

     Produkte
     Quellcodebibliothek

Aktivitäten

     Artikel über Sicherheit
     Anleitung zur Aktivierung von SSL

Muße

     Gedichte
     Musik
     Bilder

Jenseits des Üblichen ....
    

Besucherstatistik

Besucherstatistik

Monitoring

Montastic status badge