Skip to content

Commit

Permalink
Switched the mask generator to work per-tile
Browse files Browse the repository at this point in the history
  • Loading branch information
kvark committed Nov 17, 2016
1 parent 934b78d commit 9fbaa8e
Show file tree
Hide file tree
Showing 2 changed files with 59 additions and 20 deletions.
2 changes: 2 additions & 0 deletions webrender/src/mask_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,13 @@ use gpu_store::{GpuStore, GpuStoreAddress};
use internal_types::DeviceRect;
use prim_store::{ClipData, GpuBlock32, PrimitiveClipSource, PrimitiveStore};
use prim_store::{CLIP_DATA_GPU_SIZE, MASK_DATA_GPU_SIZE};
use std::i32;
use tiling::StackingContextIndex;
use util::{rect_from_points_f, TransformedRect};
use webrender_traits::{AuxiliaryLists, ImageMask};

const MAX_COORD: f32 = 1.0e+16;
pub const OPAQUE_TASK_INDEX: i32 = i32::MAX;

#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct ClipAddressRange {
Expand Down
77 changes: 57 additions & 20 deletions webrender/src/tiling.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use internal_types::{DeviceRect, DevicePoint, DeviceSize, DeviceLength, device_p
use internal_types::{ANGLE_FLOAT_TO_FIXED, LowLevelFilterOp};
use internal_types::{BatchTextures, CacheTextureId, SourceTexture};
use layer::Layer;
use mask_cache::{MaskCacheKey, MaskCacheInfo};
use mask_cache::{OPAQUE_TASK_INDEX, MaskCacheKey, MaskCacheInfo};
use prim_store::{PrimitiveGeometry, RectanglePrimitive, PrimitiveContainer};
use prim_store::{BorderPrimitiveCpu, BorderPrimitiveGpu, BoxShadowPrimitiveGpu};
use prim_store::{ImagePrimitiveCpu, ImagePrimitiveGpu, ImagePrimitiveKind};
Expand Down Expand Up @@ -62,6 +62,7 @@ trait AlphaBatchHelpers {
batch: &mut PrimitiveBatch,
layer_index: StackingContextIndex,
task_index: i32,
tile_id: TileUniqueId,
render_tasks: &RenderTaskCollection,
pass_index: RenderPassIndex);
}
Expand Down Expand Up @@ -180,6 +181,7 @@ impl AlphaBatchHelpers for PrimitiveStore {
batch: &mut PrimitiveBatch,
layer_index: StackingContextIndex,
task_index: i32,
tile_id: TileUniqueId,
render_tasks: &RenderTaskCollection,
child_pass_index: RenderPassIndex) {
let metadata = self.get_metadata(prim_index);
Expand All @@ -188,11 +190,16 @@ impl AlphaBatchHelpers for PrimitiveStore {
let prim_address = metadata.gpu_prim_index;
let clip_task_index = match metadata.clip_cache_info {
Some(ref clip_info) => {
let cache_task_id = RenderTaskId::Dynamic(RenderTaskKey::CacheMask(clip_info.key));
let cache_task_index = render_tasks.get_task_index(&cache_task_id, child_pass_index);
cache_task_index.0 as i32
let cache_task_key = RenderTaskKey::CacheMask(clip_info.key, tile_id);
if render_tasks.has_dynamic_task(&cache_task_key, child_pass_index) {
let cache_task_id = RenderTaskId::Dynamic(cache_task_key);
let cache_task_index = render_tasks.get_task_index(&cache_task_id, child_pass_index);
cache_task_index.0 as i32
} else {
OPAQUE_TASK_INDEX
}
},
None => i32::MAX, //sentinel value for the dummy mask
None => OPAQUE_TASK_INDEX
};

match &mut batch.data {
Expand Down Expand Up @@ -350,12 +357,14 @@ struct RenderPassIndex(isize);
#[derive(Debug, Copy, Clone)]
pub struct RenderTaskIndex(usize);

type TileUniqueId = usize;

#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum RenderTaskKey {
/// Draw this primitive to a cache target.
CachePrimitive(PrimitiveCacheKey),
/// Draw the tile alpha mask for a primitive.
CacheMask(MaskCacheKey),
CacheMask(MaskCacheKey, TileUniqueId),
/// Apply a vertical blur pass of given radius for this primitive.
VerticalBlur(i32, PrimitiveIndex),
/// Apply a horizontal blur pass of given radius for this primitive.
Expand Down Expand Up @@ -431,6 +440,11 @@ impl RenderTaskCollection {
}
}
}

fn has_dynamic_task(&self, key: &RenderTaskKey, pass_index: RenderPassIndex) -> bool {
//TODO: remove clone
self.dynamic_tasks.contains_key(&(key.clone(), pass_index))
}
}

#[derive(Debug, Clone)]
Expand Down Expand Up @@ -466,6 +480,7 @@ impl Default for PrimitiveGeometry {
struct AlphaBatchTask {
task_id: RenderTaskId,
items: Vec<AlphaRenderItem>,
tile_id: TileUniqueId,
}

/// Encapsulates the logic of building batches for items that are blended.
Expand Down Expand Up @@ -578,6 +593,7 @@ impl AlphaBatcher {
batch,
sc_index,
task_index,
task.tile_id,
render_tasks,
child_pass_index);
}
Expand Down Expand Up @@ -644,6 +660,7 @@ impl ClipBatcher {
struct CompileTileContext<'a> {
layer_store: &'a [StackingContext],
prim_store: &'a PrimitiveStore,
tile_id: TileUniqueId,
render_task_id_counter: AtomicUsize,
}

Expand Down Expand Up @@ -708,6 +725,7 @@ impl RenderTarget {
self.alpha_batcher.add_task(AlphaBatchTask {
task_id: task.id,
items: info.items,
tile_id: info.tile_id,
});
}
RenderTaskKind::VerticalBlur(_, prim_index) => {
Expand Down Expand Up @@ -786,7 +804,7 @@ impl RenderTarget {
}
RenderTaskKind::CacheMask(ref task_info) => {
let key = match task.id {
RenderTaskId::Dynamic(RenderTaskKey::CacheMask(ref key)) => key,
RenderTaskId::Dynamic(RenderTaskKey::CacheMask(ref key, _)) => key,
_ => unreachable!()
};
let task_index = render_tasks.get_task_index(&task.id, pass_index).0 as i32;
Expand Down Expand Up @@ -907,6 +925,7 @@ enum AlphaRenderItem {
pub struct AlphaRenderTask {
actual_rect: DeviceRect,
items: Vec<AlphaRenderItem>,
tile_id: TileUniqueId,
}

#[derive(Debug, Clone)]
Expand All @@ -915,6 +934,15 @@ pub struct CacheMaskTask {
image: Option<ImageKey>,
}

enum MaskResult {
/// The mask is completely outside the region
Outside,
/// The mask completely covers the region
Covering,
/// The mask is inside and needs to be processed
Inside(RenderTask),
}

#[derive(Debug, Clone)]
pub enum RenderTaskKind {
Alpha(AlphaRenderTask),
Expand Down Expand Up @@ -946,6 +974,7 @@ impl RenderTask {
kind: RenderTaskKind::Alpha(AlphaRenderTask {
actual_rect: actual_rect,
items: Vec::new(),
tile_id: ctx.tile_id,
}),
}
}
Expand All @@ -961,14 +990,17 @@ impl RenderTask {
}
}

fn new_mask(actual_rect: DeviceRect, cache_info: &MaskCacheInfo) -> Option<RenderTask> {
//CLIP TODO: handle a case where the tile is completely inside the intersection
if !actual_rect.intersects(&cache_info.device_rect) {
return None
fn new_mask(actual_rect: DeviceRect,
cache_info: &MaskCacheInfo,
tile_id: TileUniqueId)
-> MaskResult {
let task_rect = match actual_rect.intersection(&cache_info.device_rect) {
None => return MaskResult::Outside,
Some(rect) if rect == actual_rect => return MaskResult::Covering,
Some(rect) => rect,
};
let task_rect = cache_info.device_rect;
Some(RenderTask {
id: RenderTaskId::Dynamic(RenderTaskKey::CacheMask(cache_info.key)),
MaskResult::Inside(RenderTask {
id: RenderTaskId::Dynamic(RenderTaskKey::CacheMask(cache_info.key, tile_id)),
children: Vec::new(),
location: RenderTaskLocation::Dynamic(None, task_rect.size),
kind: RenderTaskKind::CacheMask(CacheMaskTask {
Expand Down Expand Up @@ -1686,7 +1718,8 @@ impl ScreenTile {
let layer_rect = layer.xf_rect.as_ref().unwrap().bounding_rect;
let needed_rect = layer_rect.intersection(&self.rect)
.expect("bug if these don't overlap");
let prev_task = mem::replace(&mut current_task, RenderTask::new_alpha_batch(needed_rect, ctx));
let prev_task = mem::replace(&mut current_task,
RenderTask::new_alpha_batch(needed_rect, ctx));
alpha_task_stack.push(prev_task);
}
}
Expand Down Expand Up @@ -1743,9 +1776,11 @@ impl ScreenTile {

// Add a task to render the updated image mask
if let Some(ref clip_info) = prim_metadata.clip_cache_info {
let mask_task = RenderTask::new_mask(self.rect, clip_info)
.expect("Primitive be culled by `prim_affects_tile` already");
current_task.children.push(mask_task);
match RenderTask::new_mask(self.rect, clip_info, ctx.tile_id) {
MaskResult::Outside => panic!("Primitive be culled by `prim_affects_tile` already"),
MaskResult::Covering => (), //do nothing
MaskResult::Inside(task) => current_task.children.push(task),
}
}

// Add any dynamic render tasks needed to render this primitive
Expand Down Expand Up @@ -2521,9 +2556,10 @@ impl FrameBuilder {
let mut max_passes_needed = 0;

let mut render_tasks = {
let ctx = CompileTileContext {
let mut ctx = CompileTileContext {
layer_store: &self.layer_store,
prim_store: &self.prim_store,
tile_id: 0,

// This doesn't need to be atomic right now (all the screen tiles are
// compiled on a single thread). However, in the future each of the
Expand All @@ -2537,7 +2573,8 @@ impl FrameBuilder {
}

// Build list of passes, target allocs that each tile needs.
for screen_tile in screen_tiles {
for (tile_id, screen_tile) in screen_tiles.into_iter().enumerate() {
ctx.tile_id = tile_id;
let rect = screen_tile.rect;
match screen_tile.compile(&ctx) {
Some(compiled_screen_tile) => {
Expand Down

0 comments on commit 9fbaa8e

Please sign in to comment.