Fix bug which broke pulling from a kubernetes clusters

This commit is contained in:
SeanOMik 2023-07-23 17:32:55 -04:00
parent 1600c89fd2
commit 23fa73ad20
Signed by: SeanOMik
GPG Key ID: 568F326C7EB33ACB
8 changed files with 168 additions and 13 deletions

3
.gitignore vendored
View File

@ -2,4 +2,5 @@
.env
.vscode
/registry
config.toml
config.toml
Dockerfile

64
Cargo.lock generated
View File

@ -322,7 +322,7 @@ dependencies = [
"num-integer",
"num-traits",
"serde",
"time",
"time 0.1.45",
"wasm-bindgen",
"winapi",
]
@ -424,6 +424,16 @@ version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484"
[[package]]
name = "crossbeam-channel"
version = "0.5.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200"
dependencies = [
"cfg-if",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-queue"
version = "0.3.8"
@ -1332,6 +1342,7 @@ dependencies = [
"tower-http",
"tower-layer",
"tracing",
"tracing-appender",
"tracing-log",
"tracing-subscriber",
"uuid",
@ -2094,6 +2105,33 @@ dependencies = [
"winapi",
]
[[package]]
name = "time"
version = "0.3.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59e399c068f43a5d116fedaf73b203fa4f9c519f17e2b34f63221d3792f81446"
dependencies = [
"itoa",
"serde",
"time-core",
"time-macros",
]
[[package]]
name = "time-core"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb"
[[package]]
name = "time-macros"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96ba15a897f3c86766b757e5ac7221554c6750054d74d5b28844fce5fb36a6c4"
dependencies = [
"time-core",
]
[[package]]
name = "tinyvec"
version = "1.6.0"
@ -2263,6 +2301,17 @@ dependencies = [
"tracing-core",
]
[[package]]
name = "tracing-appender"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e"
dependencies = [
"crossbeam-channel",
"time 0.3.23",
"tracing-subscriber",
]
[[package]]
name = "tracing-attributes"
version = "0.1.23"
@ -2295,6 +2344,16 @@ dependencies = [
"tracing-core",
]
[[package]]
name = "tracing-serde"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1"
dependencies = [
"serde",
"tracing-core",
]
[[package]]
name = "tracing-subscriber"
version = "0.3.16"
@ -2302,11 +2361,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70"
dependencies = [
"nu-ansi-term",
"serde",
"serde_json",
"sharded-slab",
"smallvec",
"thread_local",
"tracing-core",
"tracing-log",
"tracing-serde",
]
[[package]]

View File

@ -7,7 +7,7 @@ edition = "2021"
[dependencies]
tracing = "0.1.37"
tracing-subscriber = { version = "0.3.16", features = [ "tracing-log" ] }
tracing-subscriber = { version = "0.3.16", features = [ "tracing-log", "json" ] }
tracing-log = "0.1.3"
uuid = { version = "1.3.1", features = [ "v4", "fast-rng" ] }
@ -54,3 +54,4 @@ bcrypt = "0.14.0"
bitflags = "2.2.1"
ldap3 = "0.11.1"
lazy_static = "1.4.0"
tracing-appender = "0.2.2"

View File

@ -19,10 +19,10 @@ WORKDIR /app/src
# Build dependencies only. Separate these for caches
RUN cargo install cargo-build-deps
RUN sh -c "cargo build-deps ${RELEASE_BUILD:+ --release}"
RUN sh -c "cargo build-deps --release"
# Build the release executable.
RUN sh -c "cargo build ${RELEASE_BUILD:+ --release}"
RUN sh -c "cargo build --release"
# Runner stage. I tried using distroless (gcr.io/distroless/static-debian11), but the image was only ~3MBs smaller than
# alpine. I chose to use alpine since it makes it easier to exec into the container to debug things.

50
Dockerfile.debug Normal file
View File

@ -0,0 +1,50 @@
FROM rust:alpine3.17 as builder
ARG RELEASE_BUILD=true
# update packages
RUN apk update
RUN apk add build-base openssl-dev ca-certificates
# create root application folder
WORKDIR /app
COPY ./ /app/src
# Install rust toolchains
RUN rustup toolchain install stable
RUN rustup default stable
WORKDIR /app/src
# Build dependencies only. Separate these for caches
RUN cargo install cargo-build-deps
RUN sh -c "cargo build-deps"
# Build the release executable.
RUN sh -c "cargo build"
# Runner stage. I tried using distroless (gcr.io/distroless/static-debian11), but the image was only ~3MBs smaller than
# alpine. I chose to use alpine since it makes it easier to exec into the container to debug things.
FROM alpine:3.17
ARG UNAME=orca-registry
ARG UID=1000
ARG GID=1000
# Add user and copy the executable from the build stage.
RUN adduser --disabled-password --gecos "" $UNAME -s -G $GID -u $UID
COPY --from=builder --chown=$UID:$GID /app/src/target/debug/orca-registry /app/orca-registry
# Chown everything
RUN mkdir /data && \
chown -R $UID:$GID /data && \
chown -R $UID:$GID /app
USER $UNAME
WORKDIR /app/
EXPOSE 3000
ENTRYPOINT [ "/app/orca-registry" ]

View File

@ -82,6 +82,8 @@ pub async fn pull_manifest_get(Path((name, reference)): Path<(String, String)>,
}
let manifest_content = manifest_content.unwrap();
debug!("Pulled manifest: {}", manifest_content);
Ok((
StatusCode::OK,
[

View File

@ -2,7 +2,7 @@ use std::collections::HashMap;
use std::io::ErrorKind;
use std::sync::Arc;
use axum::http::{StatusCode, header, HeaderName};
use axum::http::{StatusCode, header, HeaderName, HeaderMap};
use axum::extract::{Path, BodyStream, State, Query};
use axum::response::{IntoResponse, Response};
@ -30,7 +30,7 @@ pub async fn start_upload_post(Path((name, )): Path<(String, )>) -> Result<Respo
).into_response());
}
pub async fn chunked_upload_layer_patch(Path((name, layer_uuid)): Path<(String, String)>, state: State<Arc<AppState>>, mut body: BodyStream) -> Result<Response, AppError> {
pub async fn chunked_upload_layer_patch(Path((name, layer_uuid)): Path<(String, String)>, headers: HeaderMap, state: State<Arc<AppState>>, mut body: BodyStream) -> Result<Response, AppError> {
let storage = state.storage.lock().await;
let current_size = storage.digest_length(&layer_uuid).await?;
@ -65,18 +65,30 @@ pub async fn chunked_upload_layer_patch(Path((name, layer_uuid)): Path<(String,
}
};
let (starting, ending) = if let Some(current_size) = current_size {
(current_size, current_size + written_size)
let ending = if let Some(current_size) = current_size {
current_size + written_size
} else {
(0, written_size)
written_size
};
if let Some(content_length) = headers.get(header::CONTENT_LENGTH) {
let content_length = content_length.to_str().map(|cl| cl.parse::<usize>());
if let Ok(Ok(content_length)) = content_length {
debug!("Client specified a content length of {}", content_length);
if content_length != written_size {
warn!("The content length that was received from the client did not match the amount written to disk!");
}
}
}
let full_uri = format!("{}/v2/{}/blobs/uploads/{}", state.config.url(), name, layer_uuid);
Ok((
StatusCode::ACCEPTED,
[
(header::LOCATION, full_uri),
(header::RANGE, format!("{}-{}", starting, ending)),
(header::RANGE, format!("0-{}", ending - 1)),
(header::CONTENT_LENGTH, "0".to_string()),
(HeaderName::from_static("docker-upload-uuid"), layer_uuid)
]
@ -122,7 +134,8 @@ pub async fn check_upload_status_get(Path((name, layer_uuid)): Path<(String, Str
StatusCode::CREATED,
[
(header::LOCATION, format!("/v2/{}/blobs/uploads/{}", name, layer_uuid)),
(header::RANGE, format!("0-{}", ending)),
(header::RANGE, format!("0-{}", ending - 1)),
(header::CONTENT_LENGTH, "0".to_string()),
(HeaderName::from_static("docker-upload-digest"), layer_uuid)
]
).into_response())

View File

@ -75,11 +75,34 @@ async fn main() -> anyhow::Result<()> {
let mut config = Config::new()
.expect("Failure to parse config!");
let mut logging_guards = Vec::new();
// Create a tracing subscriber
if !config.extra_logging {
let sqlite_config = match &config.database {
DatabaseConfig::Sqlite(sqlite) => sqlite,
};
let path = Path::new(&sqlite_config.path);
let path = path.parent().unwrap();
// create file writer
let file_appender = tracing_appender::rolling::never(path, "orca.log");
//let (syslog_nb, _syslog_guard) = tracing_appender::non_blocking(syslog);
let (file_appender_nb, _file_guard) = tracing_appender::non_blocking(file_appender);
let (stdout_nb, _stdout_guard) = tracing_appender::non_blocking(std::io::stdout());
logging_guards.push(_file_guard);
logging_guards.push(_stdout_guard);
// only allow logs from the registry
tracing_subscriber::registry()
.with(tracing_subscriber::fmt::layer())
.with(tracing_subscriber::fmt::layer()
.with_writer(file_appender_nb)
.with_writer(stdout_nb)
//.json()
)
.with(filter::Targets::new()
.with_target("orca_registry", config.log_level)
.with_default(LevelFilter::INFO)
@ -138,6 +161,8 @@ async fn main() -> anyhow::Result<()> {
let auth_middleware = axum::middleware::from_fn_with_state(state.clone(), auth::check_auth);
let path_middleware = axum::middleware::from_fn(change_request_paths);
//let debug_middleware = axum::middleware::from_fn(extreme_debug_middleware);
let app = Router::new()
.route("/token", routing::get(api::auth::auth_basic_get)
.post(api::auth::auth_basic_post))
@ -165,6 +190,7 @@ async fn main() -> anyhow::Result<()> {
.layer(auth_middleware) // require auth for ALL v2 routes
)
.with_state(state)
//.layer(debug_middleware)
.layer(TraceLayer::new_for_http());
let layered_app = NormalizePathLayer::trim_trailing_slash().layer(path_middleware.layer(app));