Skip to content

Commit

Permalink
feat(proguard): Introduce experimental chunk uploading feature
Browse files Browse the repository at this point in the history
Introduce an experimental chunk uploading feature for the `sentry-cli upload-proguard` command. The feature can be activated by setting the `SENTRY_EXPERIMENTAL_PROGUARD_CHUNK_UPLOAD` environment variable to `1`. The feature is only activated when users opt in via this environment variable.

The experimental chunk uploading feature is not backwards compatible. We attempt the upload regardless of whether the server supports receiving chunk-uploaded Proguard files. Server-side support will only be available once getsentry/sentry#81131 is released. The goal here was to create something that works, so some optimizations that we use for other chunk uploaded file types (e.g. first checking whether any chunks are present on the server, and only uploading the missing ones), are not implemented for Proguard.

Ref #2196
  • Loading branch information
szokeasaurusrex committed Nov 21, 2024
1 parent 992118c commit fa4adb8
Show file tree
Hide file tree
Showing 3 changed files with 187 additions and 49 deletions.
140 changes: 91 additions & 49 deletions src/commands/upload_proguard.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
use std::env;
use std::fs;
use std::io;
use std::path::Path;
use std::path::PathBuf;

use anyhow::{bail, Error, Result};
Expand All @@ -17,16 +19,23 @@ use crate::config::Config;
use crate::utils::android::dump_proguard_uuids_as_properties;
use crate::utils::args::ArgExt;
use crate::utils::fs::TempFile;
use crate::utils::proguard_upload;
use crate::utils::system::QuietExit;
use crate::utils::ui::{copy_with_progress, make_byte_progress_bar};

#[derive(Debug)]
struct MappingRef {
pub struct MappingRef {
pub path: PathBuf,
pub size: u64,
pub uuid: Uuid,
}

impl AsRef<Path> for MappingRef {
fn as_ref(&self) -> &Path {
&self.path
}
}

pub fn make_command(command: Command) -> Command {
command
.about("Upload ProGuard mapping files to a project.")
Expand Down Expand Up @@ -188,62 +197,95 @@ pub fn execute(matches: &ArgMatches) -> Result<()> {
}
}

if mappings.is_empty() && matches.get_flag("require_one") {
println!();
eprintln!("{}", style("error: found no mapping files to upload").red());
return Err(QuietExit(1).into());
}
let api = Api::current();
let config = Config::current();

println!("{} compressing mappings", style(">").dim());
let tf = TempFile::create()?;
{
let mut zip = zip::ZipWriter::new(tf.open()?);
for mapping in &mappings {
let pb = make_byte_progress_bar(mapping.size);
zip.start_file(
format!("proguard/{}.txt", mapping.uuid),
zip::write::FileOptions::default(),
)?;
copy_with_progress(&pb, &mut fs::File::open(&mapping.path)?, &mut zip)?;
pb.finish_and_clear();
// Don't initialize these until we confirm the user did not pass the --no-upload flag,
// or if we are using chunked uploading. This is because auth token, org, and project
// are not needed for the no-upload case.
let authenticated_api;
let (org, project);

if env::var("SENTRY_EXPERIMENTAL_PROGUARD_CHUNK_UPLOAD") == Ok("1".into()) {
log::warn!(
"EXPERIMENTAL FEATURE: Uploading proguard mappings using chunked uploading. \
Some functionality may be unavailable when using chunked uploading. Please unset \
the SENTRY_EXPERIMENTAL_PROGUARD_CHUNK_UPLOAD variable if you encounter any \
problems."
);

authenticated_api = api.authenticated()?;
(org, project) = config.get_org_and_project(matches)?;

let chunk_upload_options = authenticated_api
.get_chunk_upload_options(&org)
.map_err(|e| anyhow::anyhow!(e))
.and_then(|options| {
options.ok_or_else(|| {
anyhow::anyhow!(
"server does not support chunked uploading. unset \
SENTRY_EXPERIMENTAL_PROGUARD_CHUNK_UPLOAD to continue."
)
})
})?;

proguard_upload::chunk_upload(&mappings, &chunk_upload_options, &org, &project)?;
} else {
if mappings.is_empty() && matches.get_flag("require_one") {
println!();
eprintln!("{}", style("error: found no mapping files to upload").red());
return Err(QuietExit(1).into());
}
}

// write UUIDs into the mapping file.
if let Some(p) = matches.get_one::<String>("write_properties") {
let uuids: Vec<_> = mappings.iter().map(|x| x.uuid).collect();
dump_proguard_uuids_as_properties(p, &uuids)?;
}
println!("{} compressing mappings", style(">").dim());
let tf = TempFile::create()?;
{
let mut zip = zip::ZipWriter::new(tf.open()?);
for mapping in &mappings {
let pb = make_byte_progress_bar(mapping.size);
zip.start_file(
format!("proguard/{}.txt", mapping.uuid),
zip::write::FileOptions::default(),
)?;
copy_with_progress(&pb, &mut fs::File::open(&mapping.path)?, &mut zip)?;
pb.finish_and_clear();
}
}

if matches.get_flag("no_upload") {
println!("{} skipping upload.", style(">").dim());
return Ok(());
}
// write UUIDs into the mapping file.
if let Some(p) = matches.get_one::<String>("write_properties") {
let uuids: Vec<_> = mappings.iter().map(|x| x.uuid).collect();
dump_proguard_uuids_as_properties(p, &uuids)?;
}

println!("{} uploading mappings", style(">").dim());
let config = Config::current();
let (org, project) = config.get_org_and_project(matches)?;
if matches.get_flag("no_upload") {
println!("{} skipping upload.", style(">").dim());
return Ok(());
}

info!(
"Issuing a command for Organization: {} Project: {}",
org, project
);
println!("{} uploading mappings", style(">").dim());
(org, project) = config.get_org_and_project(matches)?;

let api = Api::current();
let authenticated_api = api.authenticated()?;
info!(
"Issuing a command for Organization: {} Project: {}",
org, project
);

let rv = authenticated_api
.region_specific(&org)
.upload_dif_archive(&project, tf.path())?;
println!(
"{} Uploaded a total of {} new mapping files",
style(">").dim(),
style(rv.len()).yellow()
);
if !rv.is_empty() {
println!("Newly uploaded debug symbols:");
for df in rv {
println!(" {}", style(&df.id()).dim());
authenticated_api = api.authenticated()?;

let rv = authenticated_api
.region_specific(&org)
.upload_dif_archive(&project, tf.path())?;
println!(
"{} Uploaded a total of {} new mapping files",
style(">").dim(),
style(rv.len()).yellow()
);
if !rv.is_empty() {
println!("Newly uploaded debug symbols:");
for df in rv {
println!(" {}", style(&df.id()).dim());
}
}
}

Expand Down
1 change: 1 addition & 0 deletions src/utils/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ pub mod http;
pub mod logging;
pub mod metrics;
pub mod progress;
pub mod proguard_upload;
pub mod releases;
pub mod retry;
pub mod sourcemaps;
Expand Down
95 changes: 95 additions & 0 deletions src/utils/proguard_upload.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
//! This file contains code for enabling chunk uploads for Proguard mappings.
//!
//! This code is intended as a temporary solution to enable chunk uploads for
//! Proguard mappings, while we work on a more permanent solution, which will
//! work for all different types of debug files.

use std::fs;

use anyhow::Result;
use indicatif::ProgressStyle;
use sha1_smol::Digest;

use super::chunks;
use super::chunks::Chunk;
use super::fs::get_sha1_checksums;
use crate::api::{Api, ChunkUploadOptions, ChunkedDifRequest};
use crate::commands::upload_proguard::MappingRef;

struct ChunkedMapping {
raw_data: Vec<u8>,
hash: Digest,
chunk_hashes: Vec<Digest>,
file_name: String,
chunk_size: usize,
}

impl ChunkedMapping {
fn try_from_mapping(mapping: &MappingRef, chunk_size: u64) -> Result<Self> {
let raw_data = fs::read(mapping)?;
let file_name = format!("/proguard/{}.txt", mapping.uuid);

let (hash, chunk_hashes) = get_sha1_checksums(&raw_data, chunk_size)?;
Ok(Self {
raw_data,
hash,
chunk_hashes,
file_name,
chunk_size: chunk_size.try_into()?,
})
}

fn chunks(&self) -> impl Iterator<Item = Chunk<'_>> {
self.raw_data
.chunks(self.chunk_size)
.zip(self.chunk_hashes.iter())
.map(|(chunk, hash)| Chunk((*hash, chunk)))
}
}

impl<'a> From<&'a ChunkedMapping> for ChunkedDifRequest<'a> {
fn from(value: &'a ChunkedMapping) -> Self {
ChunkedDifRequest {
name: &value.file_name,
debug_id: None,
chunks: &value.chunk_hashes,
}
}
}

fn to_assemble(chunked: &ChunkedMapping) -> (Digest, ChunkedDifRequest<'_>) {
(chunked.hash, chunked.into())
}

pub fn chunk_upload(
paths: &[MappingRef],
chunk_upload_options: &ChunkUploadOptions,
org: &str,
project: &str,
) -> Result<()> {
let chunked_mappings: Vec<ChunkedMapping> = paths
.iter()
.map(|path| ChunkedMapping::try_from_mapping(path, chunk_upload_options.chunk_size))
.collect::<Result<_>>()?;

let progress_style = ProgressStyle::default_bar().template(
"Uploading Proguard mappings...\
\n{{wide_bar}} {{bytes}}/{{total_bytes}} ({{eta}})",
);

let chunks = chunked_mappings.iter().flat_map(|mapping| mapping.chunks());

chunks::upload_chunks(
&chunks.collect::<Vec<_>>(),
chunk_upload_options,
progress_style,
)?;

let assemble_request = chunked_mappings.iter().map(to_assemble).collect();

Api::current()
.authenticated()?
.assemble_difs(org, project, &assemble_request)?;

Ok(())
}

0 comments on commit fa4adb8

Please sign in to comment.