app: fix IVF AV1 YUV 420 vids to work on Android. Turns out the decoding thread had a race condition, so we do the header loading sync now to get the number of frames first before continuing. it hardly affects performance much at all.

This commit is contained in:
darkfi
2025-12-28 03:21:21 -03:00
parent efd375b443
commit d1b94095a8
8 changed files with 85 additions and 54 deletions

1
bin/app/Cargo.lock generated
View File

@@ -4722,7 +4722,6 @@ dependencies = [
[[package]]
name = "rav1d"
version = "1.1.0"
source = "git+https://github.com/leo030303/rav1d?branch=add-rust-api#3ef268229621b863fd88a20cea226944d764dcd0"
dependencies = [
"assert_matches",
"atomig",

View File

@@ -67,7 +67,8 @@ indoc = "2.0.7"
# AV1 video decoding - rav1d with Rust API from leo030303 fork
# Disable default features to avoid NASM dependency (asm)
rav1d = { git = "https://github.com/leo030303/rav1d", branch = "add-rust-api", features = ["bitdepth_8"] }
#rav1d = { git = "https://github.com/leo030303/rav1d", branch = "add-rust-api", features = ["bitdepth_8"] }
rav1d = { git = "https://github.com/narodnik/rav1d", branch = "add-rust-api", features = ["bitdepth_8"] }
# This makes a HUGE difference to decoding speed.
# Over 160s to like 2s.
@@ -121,6 +122,7 @@ label = "DarkFi"
res = "data/res"
icon = "@mipmap/ic_launcher"
assets = "assets"
build_targets = ["aarch64-linux-android", "x86_64-linux-android"]
[[package.metadata.android.permission]]
name = "android.permission.INTERNET"

View File

@@ -56,7 +56,7 @@ mod ui_consts {
use crate::android::{get_appdata_path, get_external_storage_path};
use std::path::PathBuf;
pub const VID_PATH: &str = "forest_720x1280/{frame}.qoi";
pub const VID_PATH: &str = "forest_720x1280.ivf";
pub const VID_ASPECT_RATIO: f32 = 9. / 16.;
pub use super::android_ui_consts::*;
@@ -81,7 +81,6 @@ mod ui_consts {
mod desktop_paths {
use std::path::PathBuf;
//pub const VID_PATH: &str = "assets/forest_1920x1080.ivf";
pub const VID_PATH: &str = "assets/forest_1920x1080.ivf";
pub const VID_ASPECT_RATIO: f32 = 16. / 9.;

View File

@@ -37,7 +37,7 @@ const LIGHTMODE: bool = false;
mod ui_consts {
//pub const CHATDB_PATH: &str = "/data/data/darkfi.app/chatdb/";
//pub const KING_PATH: &str = "king.png";
pub const VID_PATH: &str = "forest/forest_{frame}.png";
pub const VID_PATH: &str = "forest_720x1280.ivf";
}
#[cfg(not(target_os = "android"))]

View File

@@ -75,7 +75,6 @@ impl GfxSeqAnim {
}
pub fn tick(&mut self) -> Option<GfxDrawCall> {
//t!("tick");
if self.curr_frame().is_none() {
assert_eq!(self.current_idx, 0);
return None

View File

@@ -20,6 +20,7 @@ use darkfi_serial::{
async_trait, AsyncEncodable, AsyncWrite, Decodable, Encodable, FutAsyncWriteExt,
SerialDecodable, SerialEncodable, VarInt,
};
use indoc::indoc;
#[cfg(target_os = "android")]
use miniquad::native::egl;
use miniquad::{
@@ -581,14 +582,14 @@ struct RenderContext<'a> {
impl<'a> RenderContext<'a> {
fn draw(&mut self) {
if DEBUG_RENDER {
debug!(target: "gfx", "RenderContext::draw()");
d!("RenderContext::draw()");
}
if DEBUG_TRAX {
get_trax().lock().set_curr(0);
}
self.draw_call(&self.draw_calls[&0], 0, DEBUG_RENDER);
if DEBUG_RENDER {
debug!(target: "gfx", "RenderContext::draw() [DONE]");
d!("RenderContext::draw() [DONE]");
}
}
@@ -609,7 +610,7 @@ impl<'a> RenderContext<'a> {
}
if DEBUG_RENDER {
debug!(target: "gfx", "=> viewport {view_x} {view_y} {view_w} {view_h}");
d!("=> viewport {view_x} {view_y} {view_w} {view_h}");
}
self.ctx.apply_viewport(view_x, view_y, view_w, view_h);
self.ctx.apply_scissor_rect(view_x, view_y, view_w, view_h);
@@ -648,15 +649,17 @@ impl<'a> RenderContext<'a> {
self.view.w /= self.scale;
self.view.h /= self.scale;
if is_debug {
debug!(target: "gfx", "{ws}set_scale({scale})");
d!("{ws}set_scale({scale})");
}
}
GfxDrawInstruction::Move(off) => {
self.cursor += *off;
if is_debug {
debug!(target: "gfx",
d!(
"{ws}move({off:?}) cursor={:?}, scale={}, view={:?}",
self.cursor, self.scale, self.view
self.cursor,
self.scale,
self.view
);
}
self.apply_model();
@@ -664,9 +667,11 @@ impl<'a> RenderContext<'a> {
GfxDrawInstruction::SetPos(pos) => {
self.cursor = old_cursor + *pos;
if is_debug {
debug!(target: "gfx",
d!(
"{ws}set_pos({pos:?}) cursor={:?}, scale={}, view={:?}",
self.cursor, self.scale, self.view
self.cursor,
self.scale,
self.view
);
}
self.apply_model();
@@ -685,17 +690,14 @@ impl<'a> RenderContext<'a> {
// Cursor resets within the view
self.cursor = Point::zero();
if is_debug {
debug!(target: "gfx",
"{ws}apply_view({view:?}) scale={}, view={:?}",
self.scale, self.view
);
d!("{ws}apply_view({view:?}) scale={}, view={:?}", self.scale, self.view);
}
self.apply_view();
self.apply_model();
}
GfxDrawInstruction::Draw(mesh) => {
if is_debug {
debug!(target: "gfx", "{ws}draw({mesh:?})");
d!("{ws}draw({mesh:?})");
}
let images = match &mesh.textures {
Some(texs) => texs.iter().map(|(_, tex_id)| *tex_id).collect(),
@@ -721,7 +723,7 @@ impl<'a> RenderContext<'a> {
indent = 0;
}
is_debug = true;
debug!(target: "gfx", "Frame start");
d!("Frame start");
}
GfxDrawInstruction::SetPipeline(pipeline) => {
self.gfx_pipeline = *pipeline;
@@ -729,7 +731,7 @@ impl<'a> RenderContext<'a> {
assert!(pipeline_idx < self.loaded_pipelines.len());
self.ctx.apply_pipeline(&self.loaded_pipelines[pipeline_idx]);
if is_debug {
debug!(target: "gfx", "{ws}set_pipeline({pipeline:?})");
d!("{ws}set_pipeline({pipeline:?})");
}
}
}
@@ -744,7 +746,7 @@ impl<'a> RenderContext<'a> {
get_trax().lock().set_curr(*dc_key);
}
if is_debug {
debug!(target: "gfx", "{ws}drawcall {dc_key}");
d!("{ws}drawcall {dc_key}");
}
self.draw_call(dc, indent + 1, is_debug);
}
@@ -752,7 +754,7 @@ impl<'a> RenderContext<'a> {
self.scale = old_scale;
if is_debug {
debug!(target: "gfx", "{ws}Frame close: cursor={old_cursor:?}, view={old_view:?}");
d!("{ws}Frame close: cursor={old_cursor:?}, view={old_view:?}");
}
self.view = old_view;
@@ -1127,14 +1129,14 @@ impl Stage {
width: u16,
height: u16,
data: &Vec<u8>,
fmt: TextureFormat,
format: TextureFormat,
gfx_texture_id: TextureId,
) {
let texture = self.ctx.new_texture_from_data_and_format(
data,
TextureParams {
kind: TextureKind::Texture2D,
format: fmt,
format,
width: width as _,
height: height as _,
wrap: TextureWrap::Clamp,

View File

@@ -34,7 +34,7 @@ use crate::{
use super::{ivf::IvfStreamingDemuxer, Av1VideoData, YuvTextures};
macro_rules! d { ($($arg:tt)*) => { debug!(target: "ui:video", $($arg)*); } }
macro_rules! d { ($($arg:tt)*) => { debug!(target: "ui:video::decode", $($arg)*); } }
/// Spawn the decoder thread (Thread 2 of 2)
///
@@ -61,33 +61,33 @@ pub fn spawn_decoder_thread(
vid_data: Arc<SyncMutex<Option<Av1VideoData>>>,
render_api: RenderApi,
) -> std::thread::JoinHandle<()> {
spawn_thread("video-decoder", move || {
let mut settings = Rav1dSettings::new();
// 0 is auto detect
settings.set_n_threads(4);
// 0 is auto
settings.set_max_frame_delay(0);
let mut settings = Rav1dSettings::new();
// 0 is auto detect
settings.set_n_threads(4);
// 0 is auto
settings.set_max_frame_delay(0);
let mut decoder = Rav1dDecoder::with_settings(&settings).unwrap();
//let mut decoder = Rav1dDecoder::new().unwrap();
let mut decoder = Rav1dDecoder::with_settings(&settings).unwrap();
//let mut decoder = Rav1dDecoder::new().unwrap();
let data = Arc::new(SyncMutex::new(None));
let data2 = data.clone();
miniquad::fs::load_file(&path, {
move |res| match res {
Ok(chunk) => *data2.lock() = Some(chunk),
Err(err) => {
error!("Failed to load chunk: {err}");
}
let data = Arc::new(SyncMutex::new(None));
let data2 = data.clone();
miniquad::fs::load_file(&path, {
move |res| match res {
Ok(chunk) => *data2.lock() = Some(chunk),
Err(err) => {
error!("Failed to load chunk: {err}");
}
});
let data = std::mem::take(&mut *data.lock()).unwrap();
}
});
let data = std::mem::take(&mut *data.lock()).unwrap();
let mut demuxer = IvfStreamingDemuxer::from_first_chunk(data).unwrap();
let num_frames = demuxer.header.num_frames as usize;
let mut demuxer = IvfStreamingDemuxer::from_first_chunk(data).unwrap();
let num_frames = demuxer.header.num_frames as usize;
*vid_data.lock() = Some(Av1VideoData::new(num_frames, &render_api));
*vid_data.lock() = Some(Av1VideoData::new(num_frames, &render_api));
spawn_thread("video-decoder", move || {
let mut frame_idx = 0;
loop {
let Some(av1_frame) = demuxer.try_read_frame() else {
@@ -96,7 +96,20 @@ pub fn spawn_decoder_thread(
process(&mut frame_idx, &pic, &vid_data, &render_api);
}
d!("Finished decoding video: {path}");
assert_eq!(frame_idx, vid_data.lock().as_ref().unwrap().textures.len());
assert_eq!(frame_idx, num_frames);
{
let mut vd_guard = vid_data.lock();
let vd = vd_guard.as_mut().unwrap();
for (frame_idx, tex) in vd.textures.iter().enumerate() {
if tex.is_none() {
panic!(
"Frame idx {frame_idx} / {num_frames} is none for video: {path}"
);
}
}
}
return;
};
@@ -148,13 +161,13 @@ fn process(
let height = pic.height() as usize;
// Y plane is full resolution
let y_data = y_plane[..(y_stride * height)].to_vec();
let y_data = copy_plane(&y_plane, y_stride, width, height);
// U and V planes are half resolution (4:2:0 subsampling)
let uv_width = width / 2;
let uv_height = height / 2;
let u_data = u_plane[..(u_stride * uv_height)].to_vec();
let v_data = v_plane[..(v_stride * uv_height)].to_vec();
let u_data = copy_plane(&u_plane, u_stride, uv_width, uv_height);
let v_data = copy_plane(&v_plane, v_stride, uv_width, uv_height);
// Create 3 separate textures with Alpha format (1 byte per pixel)
let tex_y = render_api.new_texture(
@@ -183,13 +196,30 @@ fn process(
let yuv_texs = YuvTextures { y: tex_y, u: tex_u, v: tex_v };
{
let num_frames = {
// Store in vid_data
let mut vd_guard = vid_data.lock();
let vd = vd_guard.as_mut().unwrap();
vd.textures[*frame_idx] = Some(yuv_texs.clone());
let _ = vd.textures_pub.try_broadcast((*frame_idx, yuv_texs));
vd.textures.len()
};
if (*frame_idx % 10) == 0 {
let pct_loaded = 100. * *frame_idx as f32 / num_frames as f32;
d!("Loaded video frame {pct_loaded:.2}%%");
}
//d!("Loaded video frame {frame_idx}");
*frame_idx += 1;
}
/// Copy plane data row by row to handle stride padding.
/// When stride > width, the decoder adds padding bytes for alignment.
/// We need to copy only the actual pixel data, excluding the padding.
fn copy_plane(plane: &[u8], stride: usize, width: usize, height: usize) -> Vec<u8> {
let mut data = Vec::with_capacity(width * height);
for row in 0..height {
let start = row * stride;
let end = start + width;
data.extend_from_slice(&plane[start..end]);
}
data
}

View File

@@ -35,10 +35,10 @@ use crate::{
use super::{DrawUpdate, OnModify, UIObject};
mod decode;
mod ivf;
mod threads;
use threads::spawn_decoder_thread;
use decode::spawn_decoder_thread;
macro_rules! t { ($($arg:tt)*) => { trace!(target: "ui:video", $($arg)*); } }