feat: image resizing (#5446)
Add image resizing on the client side to reduce load on the API
This commit is contained in:
15
codex-rs/utils/cache/Cargo.toml
vendored
Normal file
15
codex-rs/utils/cache/Cargo.toml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "codex-utils-cache"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
lru = { workspace = true }
|
||||
sha1 = { workspace = true }
|
||||
tokio = { workspace = true, features = ["sync", "rt"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
|
||||
159
codex-rs/utils/cache/src/lib.rs
vendored
Normal file
159
codex-rs/utils/cache/src/lib.rs
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
use std::borrow::Borrow;
|
||||
use std::hash::Hash;
|
||||
use std::num::NonZeroUsize;
|
||||
|
||||
use lru::LruCache;
|
||||
use sha1::Digest;
|
||||
use sha1::Sha1;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::MutexGuard;
|
||||
|
||||
/// A minimal LRU cache protected by a Tokio mutex.
|
||||
pub struct BlockingLruCache<K, V> {
|
||||
inner: Mutex<LruCache<K, V>>,
|
||||
}
|
||||
|
||||
impl<K, V> BlockingLruCache<K, V>
|
||||
where
|
||||
K: Eq + Hash,
|
||||
{
|
||||
/// Creates a cache with the provided non-zero capacity.
|
||||
#[must_use]
|
||||
pub fn new(capacity: NonZeroUsize) -> Self {
|
||||
Self {
|
||||
inner: Mutex::new(LruCache::new(capacity)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a clone of the cached value for `key`, or computes and inserts it.
|
||||
pub fn get_or_insert_with(&self, key: K, value: impl FnOnce() -> V) -> V
|
||||
where
|
||||
V: Clone,
|
||||
{
|
||||
let mut guard = lock_blocking(&self.inner);
|
||||
if let Some(v) = guard.get(&key) {
|
||||
return v.clone();
|
||||
}
|
||||
let v = value();
|
||||
// Insert and return a clone to keep ownership in the cache.
|
||||
guard.put(key, v.clone());
|
||||
v
|
||||
}
|
||||
|
||||
/// Like `get_or_insert_with`, but the value factory may fail.
|
||||
pub fn get_or_try_insert_with<E>(
|
||||
&self,
|
||||
key: K,
|
||||
value: impl FnOnce() -> Result<V, E>,
|
||||
) -> Result<V, E>
|
||||
where
|
||||
V: Clone,
|
||||
{
|
||||
let mut guard = lock_blocking(&self.inner);
|
||||
if let Some(v) = guard.get(&key) {
|
||||
return Ok(v.clone());
|
||||
}
|
||||
let v = value()?;
|
||||
guard.put(key, v.clone());
|
||||
Ok(v)
|
||||
}
|
||||
|
||||
/// Builds a cache if `capacity` is non-zero, returning `None` otherwise.
|
||||
#[must_use]
|
||||
pub fn try_with_capacity(capacity: usize) -> Option<Self> {
|
||||
NonZeroUsize::new(capacity).map(Self::new)
|
||||
}
|
||||
|
||||
/// Returns a clone of the cached value corresponding to `key`, if present.
|
||||
pub fn get<Q>(&self, key: &Q) -> Option<V>
|
||||
where
|
||||
K: Borrow<Q>,
|
||||
Q: Hash + Eq + ?Sized,
|
||||
V: Clone,
|
||||
{
|
||||
lock_blocking(&self.inner).get(key).cloned()
|
||||
}
|
||||
|
||||
/// Inserts `value` for `key`, returning the previous entry if it existed.
|
||||
pub fn insert(&self, key: K, value: V) -> Option<V> {
|
||||
lock_blocking(&self.inner).put(key, value)
|
||||
}
|
||||
|
||||
/// Removes the entry for `key` if it exists, returning it.
|
||||
pub fn remove<Q>(&self, key: &Q) -> Option<V>
|
||||
where
|
||||
K: Borrow<Q>,
|
||||
Q: Hash + Eq + ?Sized,
|
||||
{
|
||||
lock_blocking(&self.inner).pop(key)
|
||||
}
|
||||
|
||||
/// Clears all entries from the cache.
|
||||
pub fn clear(&self) {
|
||||
lock_blocking(&self.inner).clear();
|
||||
}
|
||||
|
||||
/// Executes `callback` with a mutable reference to the underlying cache.
|
||||
pub fn with_mut<R>(&self, callback: impl FnOnce(&mut LruCache<K, V>) -> R) -> R {
|
||||
let mut guard = lock_blocking(&self.inner);
|
||||
callback(&mut guard)
|
||||
}
|
||||
|
||||
/// Provides direct access to the cache guard for advanced use cases.
|
||||
pub fn blocking_lock(&self) -> MutexGuard<'_, LruCache<K, V>> {
|
||||
lock_blocking(&self.inner)
|
||||
}
|
||||
}
|
||||
|
||||
fn lock_blocking<K, V>(m: &Mutex<LruCache<K, V>>) -> MutexGuard<'_, LruCache<K, V>>
|
||||
where
|
||||
K: Eq + Hash,
|
||||
{
|
||||
match tokio::runtime::Handle::try_current() {
|
||||
Ok(_) => tokio::task::block_in_place(|| m.blocking_lock()),
|
||||
Err(_) => m.blocking_lock(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Computes the SHA-1 digest of `bytes`.
|
||||
///
|
||||
/// Useful for content-based cache keys when you want to avoid staleness
|
||||
/// caused by path-only keys.
|
||||
#[must_use]
|
||||
pub fn sha1_digest(bytes: &[u8]) -> [u8; 20] {
|
||||
let mut hasher = Sha1::new();
|
||||
hasher.update(bytes);
|
||||
let result = hasher.finalize();
|
||||
let mut out = [0; 20];
|
||||
out.copy_from_slice(&result);
|
||||
out
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::BlockingLruCache;
|
||||
use std::num::NonZeroUsize;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn stores_and_retrieves_values() {
|
||||
let cache = BlockingLruCache::new(NonZeroUsize::new(2).expect("capacity"));
|
||||
|
||||
assert!(cache.get(&"first").is_none());
|
||||
cache.insert("first", 1);
|
||||
assert_eq!(cache.get(&"first"), Some(1));
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn evicts_least_recently_used() {
|
||||
let cache = BlockingLruCache::new(NonZeroUsize::new(2).expect("capacity"));
|
||||
cache.insert("a", 1);
|
||||
cache.insert("b", 2);
|
||||
assert_eq!(cache.get(&"a"), Some(1));
|
||||
|
||||
cache.insert("c", 3);
|
||||
|
||||
assert!(cache.get(&"b").is_none());
|
||||
assert_eq!(cache.get(&"a"), Some(1));
|
||||
assert_eq!(cache.get(&"c"), Some(3));
|
||||
}
|
||||
}
|
||||
18
codex-rs/utils/image/Cargo.toml
Normal file
18
codex-rs/utils/image/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
||||
[package]
|
||||
name = "codex-utils-image"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
base64 = { workspace = true }
|
||||
image = { workspace = true, features = ["jpeg", "png"] }
|
||||
codex-utils-cache = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["fs", "rt", "rt-multi-thread", "macros"] }
|
||||
|
||||
[dev-dependencies]
|
||||
image = { workspace = true, features = ["jpeg", "png"] }
|
||||
tempfile = { workspace = true }
|
||||
25
codex-rs/utils/image/src/error.rs
Normal file
25
codex-rs/utils/image/src/error.rs
Normal file
@@ -0,0 +1,25 @@
|
||||
use image::ImageFormat;
|
||||
use std::path::PathBuf;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum ImageProcessingError {
|
||||
#[error("failed to read image at {path}: {source}")]
|
||||
Read {
|
||||
path: PathBuf,
|
||||
#[source]
|
||||
source: std::io::Error,
|
||||
},
|
||||
#[error("failed to decode image at {path}: {source}")]
|
||||
Decode {
|
||||
path: PathBuf,
|
||||
#[source]
|
||||
source: image::ImageError,
|
||||
},
|
||||
#[error("failed to encode image as {format:?}: {source}")]
|
||||
Encode {
|
||||
format: ImageFormat,
|
||||
#[source]
|
||||
source: image::ImageError,
|
||||
},
|
||||
}
|
||||
252
codex-rs/utils/image/src/lib.rs
Normal file
252
codex-rs/utils/image/src/lib.rs
Normal file
@@ -0,0 +1,252 @@
|
||||
use std::num::NonZeroUsize;
|
||||
use std::path::Path;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use crate::error::ImageProcessingError;
|
||||
use base64::Engine;
|
||||
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
|
||||
use codex_utils_cache::BlockingLruCache;
|
||||
use codex_utils_cache::sha1_digest;
|
||||
use image::ColorType;
|
||||
use image::DynamicImage;
|
||||
use image::GenericImageView;
|
||||
use image::ImageEncoder;
|
||||
use image::ImageFormat;
|
||||
use image::codecs::jpeg::JpegEncoder;
|
||||
use image::codecs::png::PngEncoder;
|
||||
use image::imageops::FilterType;
|
||||
/// Maximum width used when resizing images before uploading.
|
||||
pub const MAX_WIDTH: u32 = 2048;
|
||||
/// Maximum height used when resizing images before uploading.
|
||||
pub const MAX_HEIGHT: u32 = 768;
|
||||
|
||||
pub mod error;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EncodedImage {
|
||||
pub bytes: Vec<u8>,
|
||||
pub mime: String,
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
}
|
||||
|
||||
impl EncodedImage {
|
||||
pub fn into_data_url(self) -> String {
|
||||
let encoded = BASE64_STANDARD.encode(&self.bytes);
|
||||
format!("data:{};base64,{}", self.mime, encoded)
|
||||
}
|
||||
}
|
||||
|
||||
static IMAGE_CACHE: LazyLock<BlockingLruCache<[u8; 20], EncodedImage>> =
|
||||
LazyLock::new(|| BlockingLruCache::new(NonZeroUsize::new(32).unwrap_or(NonZeroUsize::MIN)));
|
||||
|
||||
pub fn load_and_resize_to_fit(path: &Path) -> Result<EncodedImage, ImageProcessingError> {
|
||||
let path_buf = path.to_path_buf();
|
||||
|
||||
let file_bytes = read_file_bytes(path, &path_buf)?;
|
||||
|
||||
let key = sha1_digest(&file_bytes);
|
||||
|
||||
IMAGE_CACHE.get_or_try_insert_with(key, move || {
|
||||
let format = match image::guess_format(&file_bytes) {
|
||||
Ok(ImageFormat::Png) => Some(ImageFormat::Png),
|
||||
Ok(ImageFormat::Jpeg) => Some(ImageFormat::Jpeg),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
let dynamic = image::load_from_memory(&file_bytes).map_err(|source| {
|
||||
ImageProcessingError::Decode {
|
||||
path: path_buf.clone(),
|
||||
source,
|
||||
}
|
||||
})?;
|
||||
|
||||
let (width, height) = dynamic.dimensions();
|
||||
|
||||
let encoded = if width <= MAX_WIDTH && height <= MAX_HEIGHT {
|
||||
if let Some(format) = format {
|
||||
let mime = format_to_mime(format);
|
||||
EncodedImage {
|
||||
bytes: file_bytes,
|
||||
mime,
|
||||
width,
|
||||
height,
|
||||
}
|
||||
} else {
|
||||
let (bytes, output_format) = encode_image(&dynamic, ImageFormat::Png)?;
|
||||
let mime = format_to_mime(output_format);
|
||||
EncodedImage {
|
||||
bytes,
|
||||
mime,
|
||||
width,
|
||||
height,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let resized = dynamic.resize(MAX_WIDTH, MAX_HEIGHT, FilterType::Triangle);
|
||||
let target_format = format.unwrap_or(ImageFormat::Png);
|
||||
let (bytes, output_format) = encode_image(&resized, target_format)?;
|
||||
let mime = format_to_mime(output_format);
|
||||
EncodedImage {
|
||||
bytes,
|
||||
mime,
|
||||
width: resized.width(),
|
||||
height: resized.height(),
|
||||
}
|
||||
};
|
||||
|
||||
Ok(encoded)
|
||||
})
|
||||
}
|
||||
|
||||
fn read_file_bytes(path: &Path, path_for_error: &Path) -> Result<Vec<u8>, ImageProcessingError> {
|
||||
match tokio::runtime::Handle::try_current() {
|
||||
// If we're inside a Tokio runtime, avoid block_on (it panics on worker threads).
|
||||
// Use block_in_place and do a standard blocking read safely.
|
||||
Ok(_) => tokio::task::block_in_place(|| std::fs::read(path)).map_err(|source| {
|
||||
ImageProcessingError::Read {
|
||||
path: path_for_error.to_path_buf(),
|
||||
source,
|
||||
}
|
||||
}),
|
||||
// Outside a runtime, just read synchronously.
|
||||
Err(_) => std::fs::read(path).map_err(|source| ImageProcessingError::Read {
|
||||
path: path_for_error.to_path_buf(),
|
||||
source,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
fn encode_image(
|
||||
image: &DynamicImage,
|
||||
preferred_format: ImageFormat,
|
||||
) -> Result<(Vec<u8>, ImageFormat), ImageProcessingError> {
|
||||
let target_format = match preferred_format {
|
||||
ImageFormat::Jpeg => ImageFormat::Jpeg,
|
||||
_ => ImageFormat::Png,
|
||||
};
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
|
||||
match target_format {
|
||||
ImageFormat::Png => {
|
||||
let rgba = image.to_rgba8();
|
||||
let encoder = PngEncoder::new(&mut buffer);
|
||||
encoder
|
||||
.write_image(
|
||||
rgba.as_raw(),
|
||||
image.width(),
|
||||
image.height(),
|
||||
ColorType::Rgba8.into(),
|
||||
)
|
||||
.map_err(|source| ImageProcessingError::Encode {
|
||||
format: target_format,
|
||||
source,
|
||||
})?;
|
||||
}
|
||||
ImageFormat::Jpeg => {
|
||||
let mut encoder = JpegEncoder::new_with_quality(&mut buffer, 85);
|
||||
encoder
|
||||
.encode_image(image)
|
||||
.map_err(|source| ImageProcessingError::Encode {
|
||||
format: target_format,
|
||||
source,
|
||||
})?;
|
||||
}
|
||||
_ => unreachable!("unsupported target_format should have been handled earlier"),
|
||||
}
|
||||
|
||||
Ok((buffer, target_format))
|
||||
}
|
||||
|
||||
fn format_to_mime(format: ImageFormat) -> String {
|
||||
match format {
|
||||
ImageFormat::Jpeg => "image/jpeg".to_string(),
|
||||
_ => "image/png".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use image::GenericImageView;
|
||||
use image::ImageBuffer;
|
||||
use image::Rgba;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn returns_original_image_when_within_bounds() {
|
||||
let temp_file = NamedTempFile::new().expect("temp file");
|
||||
let image = ImageBuffer::from_pixel(64, 32, Rgba([10u8, 20, 30, 255]));
|
||||
image
|
||||
.save_with_format(temp_file.path(), ImageFormat::Png)
|
||||
.expect("write png to temp file");
|
||||
|
||||
let original_bytes = std::fs::read(temp_file.path()).expect("read written image");
|
||||
|
||||
let encoded = load_and_resize_to_fit(temp_file.path()).expect("process image");
|
||||
|
||||
assert_eq!(encoded.width, 64);
|
||||
assert_eq!(encoded.height, 32);
|
||||
assert_eq!(encoded.mime, "image/png");
|
||||
assert_eq!(encoded.bytes, original_bytes);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn downscales_large_image() {
|
||||
let temp_file = NamedTempFile::new().expect("temp file");
|
||||
let image = ImageBuffer::from_pixel(4096, 2048, Rgba([200u8, 10, 10, 255]));
|
||||
image
|
||||
.save_with_format(temp_file.path(), ImageFormat::Png)
|
||||
.expect("write png to temp file");
|
||||
|
||||
let processed = load_and_resize_to_fit(temp_file.path()).expect("process image");
|
||||
|
||||
assert!(processed.width <= MAX_WIDTH);
|
||||
assert!(processed.height <= MAX_HEIGHT);
|
||||
|
||||
let loaded =
|
||||
image::load_from_memory(&processed.bytes).expect("read resized bytes back into image");
|
||||
assert_eq!(loaded.dimensions(), (processed.width, processed.height));
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn fails_cleanly_for_invalid_images() {
|
||||
let temp_file = NamedTempFile::new().expect("temp file");
|
||||
std::fs::write(temp_file.path(), b"not an image").expect("write bytes");
|
||||
|
||||
let err = load_and_resize_to_fit(temp_file.path()).expect_err("invalid image should fail");
|
||||
match err {
|
||||
ImageProcessingError::Decode { .. } => {}
|
||||
_ => panic!("unexpected error variant"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn reprocesses_updated_file_contents() {
|
||||
{
|
||||
IMAGE_CACHE.clear();
|
||||
}
|
||||
|
||||
let temp_file = NamedTempFile::new().expect("temp file");
|
||||
let first_image = ImageBuffer::from_pixel(32, 16, Rgba([20u8, 120, 220, 255]));
|
||||
first_image
|
||||
.save_with_format(temp_file.path(), ImageFormat::Png)
|
||||
.expect("write initial image");
|
||||
|
||||
let first = load_and_resize_to_fit(temp_file.path()).expect("process first image");
|
||||
|
||||
let second_image = ImageBuffer::from_pixel(96, 48, Rgba([50u8, 60, 70, 255]));
|
||||
second_image
|
||||
.save_with_format(temp_file.path(), ImageFormat::Png)
|
||||
.expect("write updated image");
|
||||
|
||||
let second = load_and_resize_to_fit(temp_file.path()).expect("process updated image");
|
||||
|
||||
assert_eq!(first.width, 32);
|
||||
assert_eq!(first.height, 16);
|
||||
assert_eq!(second.width, 96);
|
||||
assert_eq!(second.height, 48);
|
||||
assert_ne!(second.bytes, first.bytes);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user