cdb_client/src/qrscan.rs
JMARyA b3a96ed3e3
init
rewrite of cdb_ui in dioxus rust. goal is to integrate into a single rust codebase
2025-05-25 20:03:42 +02:00

238 lines
7.4 KiB
Rust

use bardecoder::prepare::BlockedMean;
use dioxus::prelude::*;
use gloo_timers::future::TimeoutFuture;
use image::{ImageBuffer, Luma, Rgba};
use std::io::Cursor;
use wasm_bindgen::{JsCast, JsValue};
use web_sys::{
window, CanvasRenderingContext2d, HtmlCanvasElement, HtmlVideoElement, MediaStreamConstraints,
};
#[component]
pub fn QRCodeScanPage(result: Signal<String>, show: Signal<bool>) -> Element {
let state = use_signal(|| "active".to_string());
let mut state2 = state.clone();
let imurl = use_signal(|| String::new());
let mut imurl2 = imurl.clone();
if *show.read() {
use_effect(move || {
let window = window().unwrap();
let nav = window.navigator();
let media_devices = nav.media_devices().unwrap();
let mut constraints = MediaStreamConstraints::new();
constraints.set_video(&JsValue::TRUE); // request camera access
let promise = media_devices
.get_user_media_with_constraints(&constraints)
.unwrap();
let future = wasm_bindgen_futures::JsFuture::from(promise);
let res = wasm_bindgen_futures::spawn_local(async move {
let stream = match future.await {
Ok(s) => s,
Err(e) => {
log::error!("Failed to get user media: {:?}", e);
return;
}
};
let video_element = window
.document()
.unwrap()
.get_element_by_id("cam")
.unwrap()
.dyn_into::<HtmlVideoElement>()
.unwrap();
let media_stream = stream.dyn_into::<web_sys::MediaStream>().unwrap();
video_element.set_src_object(Some(&media_stream));
loop {
if !*show.read() {
video_element.set_src_object(None);
stop_camera_stream(&media_stream);
result.set(String::new());
show.set(false);
return;
}
if let Some(frame) = grab_frame(&video_element) {
let image = image_data_to_image_buffer(&frame).unwrap();
state2.set("processing".to_string());
if let Ok(durl) = image_buffer_to_data_url(&image) {
imurl2.set(durl);
}
let qr = scan_qr(image);
if let Ok(qr_res) = qr {
state2.set(format!("FOUND QR!!! {qr_res}"));
println!("FOUND QR!!! {qr_res}");
video_element.set_src_object(None);
stop_camera_stream(&media_stream);
result.set(qr_res);
show.set(false);
return;
} else {
let err = qr.unwrap_err();
state2.set(format!("got {err}"));
}
} else {
log::error!("Grabing frame failed");
}
println!("Processed frame!");
TimeoutFuture::new(16).await;
}
});
});
}
rsx! {
if *show.read() {
div {
style: "position: fixed; top: 0; left: 0; width: 100vw; height: 100vh; \
z-index: 9999; background-color: rgba(0, 0, 0, 0.7); \
display: flex; flex-direction: column; align-items: center; justify-content: center;",
div {
onclick: move |_| {
show.set(false);
},
{state} }
img {
src: imurl2
}
video {
id: "cam",
style: "display: hidden",
autoplay: true,
width: "640",
height: "480"
}
}
}
}
}
fn stop_camera_stream(media_stream: &web_sys::MediaStream) {
let tracks = media_stream.get_tracks();
for i in 0..tracks.length() {
let track = tracks
.get(i)
.dyn_into::<web_sys::MediaStreamTrack>()
.unwrap();
track.stop();
}
}
fn image_buffer_to_data_url(
img: &ImageBuffer<Rgba<u8>, Vec<u8>>,
) -> Result<String, Box<dyn std::error::Error>> {
// Create an in-memory buffer
let mut buf = Vec::new();
let img = gray(img.clone());
// Encode the image buffer as PNG into the in-memory buffer
{
let mut cursor = Cursor::new(&mut buf);
img.write_to(&mut cursor, image::ImageOutputFormat::Png)?;
}
// Base64 encode the PNG bytes
let encoded = base64::encode(&buf);
// Format as data URL
let data_url = format!("data:image/png;base64,{}", encoded);
Ok(data_url)
}
pub fn gray(image: ImageBuffer<Rgba<u8>, Vec<u8>>) -> ImageBuffer<Luma<u8>, Vec<u8>> {
ImageBuffer::from_fn(image.width(), image.height(), |x, y| {
let pixel = image.get_pixel(x, y);
// Convert RGBA to grayscale using standard luminance formula
let Rgba(data) = *pixel;
let gray_value =
(0.299 * data[0] as f32 + 0.587 * data[1] as f32 + 0.114 * data[2] as f32) as u8;
Luma([gray_value])
})
}
pub fn scan_qr(image: ImageBuffer<Rgba<u8>, Vec<u8>>) -> Result<String, String> {
let mut db = bardecoder::default_builder();
// Use some different arguments in one of the default components
db.prepare(Box::new(BlockedMean::new(7, 9)));
// Build the actual decoder
let decoder = db.build();
let results = decoder.decode(&image);
for result in results {
return match result {
Ok(res) => Ok(res),
Err(e) => Err(e.to_string()),
};
}
Err("oh no".to_string())
}
/// Convert `web_sys::ImageData` to `image::ImageBuffer<Rgba<u8>, Vec<u8>>`
pub fn image_data_to_image_buffer(
image_data: &web_sys::ImageData,
) -> Option<ImageBuffer<Rgba<u8>, Vec<u8>>> {
// Get raw RGBA pixel data as a Vec<u8>
let data = image_data.data();
let pixels = &data;
// The width and height of the image
let width = image_data.width();
let height = image_data.height();
// Create ImageBuffer from raw pixels (RGBA)
ImageBuffer::from_vec(width, height, pixels.to_vec())
}
fn grab_frame(video_element: &HtmlVideoElement) -> Option<web_sys::ImageData> {
let width = video_element.video_width();
let height = video_element.video_height();
if width == 0 || height == 0 {
// Video not ready yet
return None;
}
let document = web_sys::window()?.document()?;
let canvas = document
.create_element("canvas")
.ok()?
.dyn_into::<HtmlCanvasElement>()
.ok()?;
canvas.set_width(width);
canvas.set_height(height);
let ctx = canvas
.get_context("2d")
.ok()??
.dyn_into::<CanvasRenderingContext2d>()
.ok()?;
// Draw the current video frame
ctx.draw_image_with_html_video_element(video_element, 0.0, 0.0)
.ok()?;
// Extract the frame as ImageData
ctx.get_image_data(0.0, 0.0, width as f64, height as f64)
.ok()
}