Skip to content

Commit

Permalink
feat: Docker exec mode, closes #28
Browse files Browse the repository at this point in the history
  • Loading branch information
mrjackwills committed Nov 16, 2023
1 parent e1998c9 commit c8077bc
Show file tree
Hide file tree
Showing 13 changed files with 393 additions and 268 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ In application controls
| ```( enter )```| execute selected docker command|
| ```( 1-9 )``` | sort containers by heading, clicking on headings also sorts the selected column |
| ```( 0 )``` | stop sorting |
| ```( e )``` | (attempt) to exec into the selected container |
| ```( h )``` | toggle help menu |
| ```( m )``` | toggle mouse capture - if disabled, text on screen can be selected|
| ```( q )``` | to quit at any time |
Expand Down
22 changes: 18 additions & 4 deletions containerised/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -45,18 +45,32 @@ RUN cargo build --release --target $(cat /.platform)

RUN cp /usr/src/oxker/target/$(cat /.platform)/release/oxker /

################
## MUSL SETUP ##
################

FROM alpine:3.18 as MUSL_SETUP

RUN apk add --update --no-cache docker-cli upx

# Compress the docker executable, to reduce final image size
RUN upx -9 /usr/bin/docker

#############
## Runtime ##
#############

FROM scratch AS RUNTIME
FROM alpine:3.18 as RUNTIME

# Set an ENV that we're running in a container, so that the application can sleep for 250ms at start
# Set an ENV to indicate that we're running in a container
ENV OXKER_RUNTIME=container

# Copy application binary from builder image
COPY --from=BUILDER /oxker /app/
COPY --from=MUSL_SETUP /usr/bin/docker /usr/bin/

# remove sh and busybox, probably pointless
RUN rm /bin/sh /bin/busybox

# Run the application
# this is used in the application itself, to stop itself from listing itself, so DO NOT EDIT
# this is used in the application itself so DO NOT EDIT
ENTRYPOINT [ "/app/oxker"]
22 changes: 19 additions & 3 deletions containerised/Dockerfile_dev
Original file line number Diff line number Diff line change
@@ -1,12 +1,28 @@
################
## MUSL SETUP ##
################

FROM alpine:3.18 as MUSL_SETUP

RUN apk add --update --no-cache docker-cli upx

# Copy application binary from builder image
RUN upx -9 /usr/bin/docker

#############
## Runtime ##
#############
FROM scratch

# Set env that we're running in a container, so that the application can sleep for 250ms at start
FROM alpine:3.18 as RUNTIME

# Set an ENV that we're running in a container, so that the application can sleep for 250ms at start
ENV OXKER_RUNTIME=container

# Copy application binary from builder image
COPY --from=MUSL_SETUP /usr/bin/docker /usr/bin/


RUN rm /bin/sh /bin/busybox

COPY ./target/x86_64-unknown-linux-musl/release/oxker /app/

# Run the application
Expand Down
6 changes: 3 additions & 3 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ services:
deploy:
resources:
limits:
memory: 128M
memory: 1024M
redis:
image: redis:alpine3.18
container_name: redis
Expand All @@ -27,7 +27,7 @@ services:
deploy:
resources:
limits:
memory: 16M
memory: 384M
rabbitmq:
image: rabbitmq:3
container_name: rabbitmq
Expand All @@ -38,6 +38,6 @@ services:
deploy:
resources:
limits:
memory: 256M
memory: 512M


52 changes: 27 additions & 25 deletions src/app_data/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -563,6 +563,8 @@ impl AppData {
})
});

let id = ContainerId::from(id.as_str());

let is_oxker = i
.command
.as_ref()
Expand All @@ -579,8 +581,6 @@ impl AppData {
.as_ref()
.map_or(String::new(), std::clone::Clone::clone);

let id = ContainerId::from(id.as_str());

let created = i
.created
.map_or(0, |i| u64::try_from(i).unwrap_or_default());
Expand Down Expand Up @@ -624,31 +624,33 @@ impl AppData {
let timestamp = self.args.timestamp;

if let Some(container) = self.get_container_by_id(id) {
container.last_updated = Self::get_systemtime();
let current_len = container.logs.len();

for mut i in logs {
let tz = LogsTz::from(i.as_str());
// Strip the timestamp if `-t` flag set
if !timestamp {
i = i.replace(&tz.to_string(), "");
if !container.is_oxker {
container.last_updated = Self::get_systemtime();
let current_len = container.logs.len();

for mut i in logs {
let tz = LogsTz::from(i.as_str());
// Strip the timestamp if `-t` flag set
if !timestamp {
i = i.replace(&tz.to_string(), "");
}
let lines = if color {
log_sanitizer::colorize_logs(&i)
} else if raw {
log_sanitizer::raw(&i)
} else {
log_sanitizer::remove_ansi(&i)
};
container.logs.insert(ListItem::new(lines), tz);
}
let lines = if color {
log_sanitizer::colorize_logs(&i)
} else if raw {
log_sanitizer::raw(&i)
} else {
log_sanitizer::remove_ansi(&i)
};
container.logs.insert(ListItem::new(lines), tz);
}

// Set the logs selected row for each container
// Either when no long currently selected, or currently selected (before updated) is already at end
if container.logs.state().selected().is_none()
|| container.logs.state().selected().map_or(1, |f| f + 1) == current_len
{
container.logs.end();
// Set the logs selected row for each container
// Either when no long currently selected, or currently selected (before updated) is already at end
if container.logs.state().selected().is_none()
|| container.logs.state().selected().map_or(1, |f| f + 1) == current_len
{
container.logs.end();
}
}
}
}
Expand Down
2 changes: 2 additions & 0 deletions src/app_error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use std::fmt;
#[derive(Debug, Clone, Copy)]
pub enum AppError {
DockerCommand(DockerControls),
DockerExec,
DockerConnect,
DockerInterval,
InputPoll,
Expand All @@ -18,6 +19,7 @@ impl fmt::Display for AppError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Self::DockerCommand(s) => write!(f, "Unable to {s} container"),
Self::DockerExec => write!(f, "Unable to exec into container"),
Self::DockerConnect => write!(f, "Unable to access docker daemon"),
Self::DockerInterval => write!(f, "Docker update interval needs to be greater than 0"),
Self::InputPoll => write!(f, "Unable to poll user input"),
Expand Down
58 changes: 19 additions & 39 deletions src/docker_data/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ pub struct DockerData {
app_data: Arc<Mutex<AppData>>,
args: CliArgs,
binate: Binate,
containerised: bool,
docker: Arc<Docker>,
gui_state: Arc<Mutex<GuiState>>,
is_running: Arc<AtomicBool>,
Expand Down Expand Up @@ -101,6 +100,7 @@ impl DockerData {
spawn_id: SpawnId,
spawns: Arc<Mutex<HashMap<SpawnId, JoinHandle<()>>>>,
) {

let mut stream = docker
.stats(
id.get(),
Expand Down Expand Up @@ -191,7 +191,7 @@ impl DockerData {
.into_iter()
.filter_map(|f| match f.id {
Some(_) => {
if self.containerised
if self.args.in_container
&& f.command
.as_ref()
.map_or(false, |c| c.starts_with(ENTRY_POINT))
Expand Down Expand Up @@ -286,32 +286,12 @@ impl DockerData {
self.app_data.lock().sort_containers();
}

/// Animate the loading icon
fn loading_spin(loading_uuid: Uuid, gui_state: &Arc<Mutex<GuiState>>) -> JoinHandle<()> {
let gui_state = Arc::clone(gui_state);
tokio::spawn(async move {
loop {
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
gui_state.lock().next_loading(loading_uuid);
}
})
}

/// Stop the loading_spin function, and reset gui loading status
fn stop_loading_spin(
gui_state: &Arc<Mutex<GuiState>>,
handle: &JoinHandle<()>,
loading_uuid: Uuid,
) {
handle.abort();
gui_state.lock().remove_loading(loading_uuid);
}

/// Initialize docker container data, before any messages are received
async fn initialise_container_data(&mut self) {
self.gui_state.lock().status_push(Status::Init);
let loading_uuid = Uuid::new_v4();
let loading_spin = Self::loading_spin(loading_uuid, &Arc::clone(&self.gui_state));
let loading_handle = GuiState::start_loading_animation(&self.gui_state, loading_uuid);
// let handle = self.gui_state.lock().st

let all_ids = self.update_all_containers().await;

Expand All @@ -323,7 +303,9 @@ impl DockerData {
while !self.app_data.lock().initialised(&all_ids) {
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
}
Self::stop_loading_spin(&self.gui_state, &loading_spin, loading_uuid);
self.gui_state
.lock()
.stop_loading_animation(&loading_handle, loading_uuid);
self.gui_state.lock().status_del(Status::Init);
}

Expand All @@ -350,61 +332,61 @@ impl DockerData {
match message {
DockerMessage::Pause(id) => {
tokio::spawn(async move {
let loading_spin = Self::loading_spin(uuid, &gui_state);
let handle = GuiState::start_loading_animation(&gui_state, uuid);
if docker.pause_container(id.get()).await.is_err() {
Self::set_error(&app_data, DockerControls::Pause, &gui_state);
}
Self::stop_loading_spin(&gui_state, &loading_spin, uuid);
gui_state.lock().stop_loading_animation(&handle, uuid);
});
self.update_everything().await;
}
DockerMessage::Restart(id) => {
tokio::spawn(async move {
let loading_spin = Self::loading_spin(uuid, &gui_state);
let handle = GuiState::start_loading_animation(&gui_state, uuid);
if docker.restart_container(id.get(), None).await.is_err() {
Self::set_error(&app_data, DockerControls::Restart, &gui_state);
}
Self::stop_loading_spin(&gui_state, &loading_spin, uuid);
gui_state.lock().stop_loading_animation(&handle, uuid);
});
self.update_everything().await;
}
DockerMessage::Start(id) => {
tokio::spawn(async move {
let loading_spin = Self::loading_spin(uuid, &gui_state);
let handle = GuiState::start_loading_animation(&gui_state, uuid);
if docker
.start_container(id.get(), None::<StartContainerOptions<String>>)
.await
.is_err()
{
Self::set_error(&app_data, DockerControls::Start, &gui_state);
}
Self::stop_loading_spin(&gui_state, &loading_spin, uuid);
gui_state.lock().stop_loading_animation(&handle, uuid);
});
self.update_everything().await;
}
DockerMessage::Stop(id) => {
tokio::spawn(async move {
let loading_spin = Self::loading_spin(uuid, &gui_state);
let handle = GuiState::start_loading_animation(&gui_state, uuid);
if docker.stop_container(id.get(), None).await.is_err() {
Self::set_error(&app_data, DockerControls::Stop, &gui_state);
}
Self::stop_loading_spin(&gui_state, &loading_spin, uuid);
gui_state.lock().stop_loading_animation(&handle, uuid);
});
self.update_everything().await;
}
DockerMessage::Unpause(id) => {
tokio::spawn(async move {
let loading_spin = Self::loading_spin(uuid, &gui_state);
let handle = GuiState::start_loading_animation(&gui_state, uuid);
if docker.unpause_container(id.get()).await.is_err() {
Self::set_error(&app_data, DockerControls::Unpause, &gui_state);
}
Self::stop_loading_spin(&gui_state, &loading_spin, uuid);
gui_state.lock().stop_loading_animation(&handle, uuid);
});
self.update_everything().await;
}
DockerMessage::Delete(id) => {
tokio::spawn(async move {
let loading_spin = Self::loading_spin(uuid, &gui_state);
let handle = GuiState::start_loading_animation(&gui_state, uuid);
if docker
.remove_container(
id.get(),
Expand All @@ -419,7 +401,7 @@ impl DockerData {
{
Self::set_error(&app_data, DockerControls::Stop, &gui_state);
}
Self::stop_loading_spin(&gui_state, &loading_spin, uuid);
gui_state.lock().stop_loading_animation(&handle, uuid);
});
self.update_everything().await;
self.gui_state.lock().set_delete_container(None);
Expand All @@ -443,7 +425,6 @@ impl DockerData {
/// Initialise self, and start the message receiving loop
pub async fn init(
app_data: Arc<Mutex<AppData>>,
containerised: bool,
docker: Docker,
docker_rx: Receiver<DockerMessage>,
gui_state: Arc<Mutex<GuiState>>,
Expand All @@ -453,7 +434,6 @@ impl DockerData {
if app_data.lock().get_error().is_none() {
let mut inner = Self {
app_data,
containerised,
args,
binate: Binate::One,
docker: Arc::new(docker),
Expand Down
Loading

0 comments on commit c8077bc

Please # to comment.