make self-update work on Linux

This commit is contained in:
Connor Peet 2022-10-17 13:34:06 -07:00
parent 7c3740a7e7
commit 4e9bdbd44f
No known key found for this signature in database
GPG key ID: CF8FD2EA0DBC61BD
4 changed files with 71 additions and 17 deletions

View file

@ -312,7 +312,7 @@ stages:
parameters:
VSCODE_PUBLISH: ${{ variables.VSCODE_PUBLISH }}
VSCODE_QUALITY: ${{ variables.VSCODE_QUALITY }}
VSCODE_BUILD_TUNNEL_CLI: false # todo until 32 bit CLI is available
VSCODE_BUILD_TUNNEL_CLI: true
VSCODE_RUN_UNIT_TESTS: ${{ eq(parameters.VSCODE_STEP_ON_IT, false) }}
VSCODE_RUN_INTEGRATION_TESTS: ${{ eq(parameters.VSCODE_STEP_ON_IT, false) }}
VSCODE_RUN_SMOKE_TESTS: ${{ eq(parameters.VSCODE_STEP_ON_IT, false) }}

View file

@ -3,8 +3,6 @@
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
use std::process::Stdio;
use async_trait::async_trait;
use tokio::sync::oneshot;
@ -215,6 +213,10 @@ async fn serve_with_csa(
csa: CodeServerArgs,
shutdown_rx: Option<oneshot::Receiver<()>>,
) -> Result<i32, AnyError> {
// Intentionally read before starting the server. If the server updated and
// respawn is requested, the old binary will get renamed, and then
// current_exe will point to the wrong path.
let current_exe = std::env::current_exe().unwrap();
let platform = spanf!(log, log.span("prereq"), PreReqChecker::new().verify())?;
let auth = Auth::new(&paths, log.clone());
@ -244,11 +246,8 @@ async fn serve_with_csa(
// reuse current args, but specify no-forward since tunnels will
// already be running in this process, and we cannot do a login
let args = std::env::args().skip(1).collect::<Vec<String>>();
let exit = std::process::Command::new(std::env::current_exe().unwrap())
let exit = std::process::Command::new(current_exe)
.args(args)
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.stdin(Stdio::inherit())
.spawn()
.map_err(|e| wrap(e, "error respawning after update"))?
.wait()

View file

@ -5,12 +5,54 @@
use crate::util::errors::{wrap, WrappedError};
use flate2::read::GzDecoder;
use std::fs::File;
use std::fs;
use std::io::{Seek, SeekFrom};
use std::path::{Path, PathBuf};
use tar::Archive;
use super::io::ReportCopyProgress;
fn should_skip_first_segment(file: &fs::File) -> Result<bool, WrappedError> {
// unfortunately, we need to re-read the archive here since you cannot reuse
// `.entries()`. But this will generally only look at one or two files, so this
// should be acceptably speedy... If not, we could hardcode behavior for
// different types of archives.
let tar = GzDecoder::new(file);
let mut archive = Archive::new(tar);
let mut entries = archive
.entries()
.map_err(|e| wrap(e, "error opening archive"))?;
let first_name = {
let file = entries
.next()
.expect("expected not to have an empty archive")
.map_err(|e| wrap(e, "error reading entry file"))?;
let path = file.path().expect("expected to have path");
path.iter()
.next()
.expect("expected to have non-empty name")
.to_owned()
};
let mut had_multiple = false;
for file in entries {
if let Ok(file) = file {
had_multiple = true;
if let Ok(name) = file.path() {
if name.iter().next() != Some(&first_name) {
return Ok(false);
}
}
}
}
Ok(had_multiple) // prefix removal is invalid if there's only a single file
}
pub fn decompress_tarball<T>(
path: &Path,
parent_path: &Path,
@ -19,12 +61,15 @@ pub fn decompress_tarball<T>(
where
T: ReportCopyProgress,
{
let tar_gz = File::open(path).map_err(|e| {
wrap(
Box::new(e),
format!("error opening file {}", path.display()),
)
})?;
let mut tar_gz = fs::File::open(path)
.map_err(|e| wrap(e, format!("error opening file {}", path.display())))?;
let skip_first = should_skip_first_segment(&tar_gz)?;
// reset since skip logic read the tar already:
tar_gz
.seek(SeekFrom::Start(0))
.map_err(|e| wrap(e, "error resetting seek position"))?;
let tar = GzDecoder::new(tar_gz);
let mut archive = Archive::new(tar);
@ -37,7 +82,17 @@ where
.path()
.map_err(|e| wrap(e, "error reading entry path"))?;
let path = parent_path.join(entry_path.iter().skip(1).collect::<PathBuf>());
let path = parent_path.join(if skip_first {
entry_path.iter().skip(1).collect::<PathBuf>()
} else {
entry_path.into_owned()
});
if let Some(p) = path.parent() {
fs::create_dir_all(&p)
.map_err(|e| wrap(e, format!("could not create dir for {}", p.display())))?;
}
entry
.unpack(&path)
.map_err(|e| wrap(e, format!("error unpacking {}", path.display())))?;

View file

@ -41,7 +41,7 @@ fn should_skip_first_segment(archive: &mut ZipArchive<File>) -> bool {
}
}
true
archive.len() > 1 // prefix removal is invalid if there's only a single file
}
pub fn unzip_file<T>(path: &Path, parent_path: &Path, mut reporter: T) -> Result<(), WrappedError>
@ -59,7 +59,7 @@ where
} else {
0
};
println!("len: {}", archive.len());
for i in 0..archive.len() {
reporter.report_progress(i as u64, archive.len() as u64);
let mut file = archive