+ smaller images that should work on more CPU architectures

This commit is contained in:
nicobo 2021-01-17 23:40:30 +01:00
parent 9e15e4f56e
commit 931cfae18d
No known key found for this signature in database
GPG key ID: 2581E71C5FA5285F
8 changed files with 407 additions and 156 deletions

View file

@ -46,48 +46,14 @@ jobs:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Build and push debian
id: docker_build_debian
uses: docker/build-push-action@v2
with:
context: ./
file: ./Dockerfile-debian
builder: ${{ steps.buildx.outputs.name }}
platforms: linux/386,linux/amd64
push: true
tags: |
nicolabs/nicobot:debian
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Image digest
run: echo ${{ steps.docker_build_debian.outputs.digest }}
- name: Build and push debian-slim
id: docker_build_debian_slim
uses: docker/build-push-action@v2
with:
context: ./
file: ./Dockerfile-debian-slim
builder: ${{ steps.buildx.outputs.name }}
platforms: linux/amd64
push: true
tags: |
nicolabs/nicobot:debian-slim
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Image digest
run: echo ${{ steps.docker_build_debian_slim.outputs.digest }}
- name: Build and push alpine
id: docker_build_alpine
uses: docker/build-push-action@v2
with:
context: ./
file: ./Dockerfile-alpine
file: ./alpine.Dockerfile
builder: ${{ steps.buildx.outputs.name }}
platforms: linux/amd64,linux/arm64
platforms: linux/arm/v7,linux/arm/v5,linux/arm64,linux/386,linux/amd64
push: true
tags: |
nicolabs/nicobot:alpine
@ -97,6 +63,40 @@ jobs:
- name: Image digest
run: echo ${{ steps.docker_build_alpine.outputs.digest }}
- name: Build and push debian
id: docker_build_debian
uses: docker/build-push-action@v2
with:
context: ./
file: ./debian.Dockerfile
builder: ${{ steps.buildx.outputs.name }}
platforms: linux/arm/v7,linux/arm/v5,linux/arm64,linux/386,linux/amd64
push: true
tags: |
nicolabs/nicobot:debian
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Image digest
run: echo ${{ steps.docker_build_debian.outputs.digest }}
- name: Build and push debian-signal
id: docker_build_debian_signal
uses: docker/build-push-action@v2
with:
context: ./
file: ./debian-signal.Dockerfile
builder: ${{ steps.buildx.outputs.name }}
platforms: linux/arm/v7,linux/arm/v5,linux/arm64,linux/386,linux/amd64
push: true
tags: |
nicolabs/nicobot:debian-signal
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Image digest
run: echo ${{ steps.docker_build_debian_signal.outputs.digest }}
# https://github.com/marketplace/actions/build-and-push-docker-images#update-dockerhub-repo-description
- name: Update repo description
uses: peter-evans/dockerhub-description@v2

View file

@ -1,50 +0,0 @@
FROM python:3 as builder
# Signal installation
WORKDIR /root
# TODO Allow this to be a build variable
ENV SIGNAL_VERSION=0.7.1
RUN wget "https://github.com/AsamK/signal-cli/releases/download/v${SIGNAL_VERSION}/signal-cli-${SIGNAL_VERSION}.tar.gz"
RUN tar xf "signal-cli-${SIGNAL_VERSION}.tar.gz" -C /opt
RUN mv "/opt/signal-cli-${SIGNAL_VERSION}" /opt/signal-cli
# Provides :
# - Python version > 3.4.2
# - bash
# - glibc
FROM python:3
WORKDIR /usr/src/app
RUN apt-get update && \
apt install -y cmake g++ make \
default-jre-headless \
&& \
rm -rf /var/lib/apt/lists/*
# signal-cli files
COPY --from=builder /opt/signal-cli /opt/signal-cli
ENV PATH=/opt/signal-cli/bin:$PATH
# TODO How to do it with one COPY ?
# Or it could be COPY . . with a proper .dockerignore
# Or build the context as a preliminary step
COPY nicobot nicobot/
COPY requirements-runtime.txt .
RUN pip install --no-cache-dir -r requirements-runtime.txt
# It could be packaged (RUN python setup.py sdist bdist_wheel) to possibly
# improve size and speed ; probably as a multistage build
# And update the version from git using setuptools-scm
# But it requires a bit of work
#RUN python setup.py sdist bdist_wheel
# This script allows packaging several bots in the same image
# (to be clean they could be in separate images but they're so close that it's a lot easier to package and does not waste space by duplicating images)
# Otherwise the ENTRYPOINT should simply be [ "python"]
# Made a a separate COPY because it's a docker-specific layer
# (other layers don't need to be re-built if this one changes)
COPY docker/docker-entrypoint.sh .
ENTRYPOINT [ "./docker-entrypoint.sh" ]

View file

@ -1,54 +0,0 @@
FROM python:3 as builder
# Signal installation
WORKDIR /root
# TODO Allow this to be a build variable
ENV SIGNAL_VERSION=0.7.1
RUN wget "https://github.com/AsamK/signal-cli/releases/download/v${SIGNAL_VERSION}/signal-cli-${SIGNAL_VERSION}.tar.gz"
RUN tar xf "signal-cli-${SIGNAL_VERSION}.tar.gz" -C /opt
RUN mv "/opt/signal-cli-${SIGNAL_VERSION}" /opt/signal-cli
# Provides :
# - Python version > 3.4.2
# - bash
# - glibc
FROM python:3-slim
WORKDIR /usr/src/app
# Or else we get the following error installing the jre :
# update-alternatives: error: error creating symbolic link '/usr/share/man/man1/rmid.1.gz.dpkg-tmp': No such file or directory
RUN mkdir -p /usr/share/man/man1 /usr/share/man/man7
RUN apt-get update && \
apt install -y cmake g++ make \
default-jre-headless \
&& \
rm -rf /var/lib/apt/lists/*
# signal-cli files
COPY --from=builder /opt/signal-cli /opt/signal-cli
ENV PATH=/opt/signal-cli/bin:$PATH
# TODO How to do it with one COPY ?
# Or it could be COPY . . with a proper .dockerignore
# Or build the context as a preliminary step
COPY nicobot nicobot/
COPY requirements-runtime.txt .
RUN pip install --no-cache-dir -r requirements-runtime.txt
# It could be packaged (RUN python setup.py sdist bdist_wheel) to possibly
# improve size and speed ; probably as a multistage build
# And update the version from git using setuptools-scm
# But it requires a bit of work
#RUN python setup.py sdist bdist_wheel
# This script allows packaging several bots in the same image
# (to be clean they could be in separate images but they're so close that it's a lot easier to package and does not waste space by duplicating images)
# Otherwise the ENTRYPOINT should simply be [ "python"]
# Made a a separate COPY because it's a docker-specific layer
# (other layers don't need to be re-built if this one changes)
COPY docker/docker-entrypoint.sh .
ENTRYPOINT [ "./docker-entrypoint.sh" ]

93
alpine.Dockerfile Normal file
View file

@ -0,0 +1,93 @@
######################################
# DISCLAIMER
# This image is based on Alpine linux in the hope of acheiving a minimum memory footprint.
# There isn't a consensus on using alpine with Python :
# - https://pythonspeed.com/articles/alpine-docker-python
# - https://nickjanetakis.com/blog/the-3-biggest-wins-when-using-alpine-as-a-base-docker-image
# However it may help reclaim some MB on low-end computers like Raspberry Pi...
######################################
######################################
# STAGE 1 : Builder image
#
FROM python:3-alpine as builder
# python:3-alpine misses gcc, ffi.h, ...
#
# GCC part :
# https://number1.co.za/alpine-python-docker-base-image-problem-with-gcc/
# https://wiki.alpinelinux.org/wiki/How_to_get_regular_stuff_working
#
# Python cryptography part :
# https://stackoverflow.com/questions/35736598/cannot-pip-install-cryptography-in-docker-alpine-linux-3-3-with-openssl-1-0-2g
# https://github.com/pyca/cryptography/blob/1340c00/docs/installation.rst#building-cryptography-on-linux
# build-base gcc ... : required to build Python dependencies
# openjdk : javac to compile GetSystemProperty.java (to check the value of java.library.path)
# git zip cargo make : to compile libzkgroup
# See also https://blog.logrocket.com/packaging-a-rust-web-service-using-docker/
RUN apk add --no-cache build-base gcc abuild binutils cmake \
# See https://cryptography.io/en/latest/installation.html#alpine for cryptography dependencies
gcc musl-dev python3-dev libffi-dev libressl-dev \
zip make \
# cargo rust \
# git required by setuptools-scm during 'pip install'
git
# Rust is a requirement to build the 'cryptography' Python module
# Installs rust using the recommended 'rustup' method (vs apt-get,
# which seems to be less portable / outdated)
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
WORKDIR /usr/src/app
COPY . .
# This step WILL trigger a compilation on platforms without Python wheels
RUN python3 -m pip install --no-cache-dir --user --upgrade pip && \
python3 -m pip install --no-cache-dir --user -r requirements-runtime.txt .
# Not used currently (we just copy the /root/.local directory which has everyting thanks to the --user option)
# Finally put (only runtime) compiled wheels under ./wheels/
# https://pip.pypa.io/en/stable/user_guide/#installation-bundles
#RUN pip wheel -r requirements-runtime.txt . --wheel-dir=wheels
######################################
# STAGE 2 : Final image
#
# The base image must provide :
# - Python version > 3.4.2
# - bash
# - glibc
FROM python:3-alpine
WORKDIR /usr/src/app
# Runtime packages requirements
#
# libressl-dev : seems required for python to locate modules, or for omemo ?
# bash is to use extended syntax in entrypoint.sh (in particular tee >(...))
RUN apk add --no-cache libressl-dev bash
# Not used currently (we just copy the /root/.local directory which has everyting thanks to the --user option)
#COPY --from=builder /usr/src/app/wheels ./wheels
#RUN pip install --no-cache-dir --force-reinstall --ignore-installed --upgrade --no-index wheels/*
# https://www.docker.com/blog/containerized-python-development-part-1/
ENV PATH=/root/.local/bin:$PATH
# All Python files, including nicobot's ones
COPY --from=builder /root/.local /root/.local/
# This script allows :
# - packaging several bots in the same image (to be cleaner they could be in
# separate images but they're so close that it's a lot easier to package and
# does not waste space by duplicating layers)
# - also adds extra command line options for Signal device linking
# Otherwise the ENTRYPOINT would simply be [ "python"]
COPY docker/docker-entrypoint.sh .
ENTRYPOINT [ "./docker-entrypoint.sh" ]

116
debian-signal.Dockerfile Normal file
View file

@ -0,0 +1,116 @@
############################
# STAGE 1
#
# Getting around this bug : https://github.com/docker/buildx/issues/395
# > warning: spurious network error (2 tries remaining): could not read directory '/root/.cargo/registry/index/github.com-1ecc6299db9ec823/.git//refs': Value too large for defined data type; class=Os (2)
#
# Downloads files into this temporary image, including .cargo/*
#
#ARG BUILDPLATFORM
FROM --platform=$BUILDPLATFORM rust:1.49-buster AS rust_fix
RUN apt-get update && \
apt-get install -y git
RUN git clone https://github.com/signalapp/zkgroup.git /usr/src/zkgroup
WORKDIR /usr/src/zkgroup
ENV USER=root
RUN mkdir -p .cargo \
&& cargo vendor > .cargo/config
######################################
# STAGE 2
#
# Builder for signal-cli & libzkgroupn its native dependency
#
FROM rust:1.49-buster as signal_builder
ARG TARGETPLATFORM
ARG signal_version=0.7.1
# Buggy tzdata installation : https://serverfault.com/questions/949991/how-to-install-tzdata-on-a-ubuntu-docker-image
ARG TZ=Europe/Paris
RUN apt-get update
RUN apt-get install -y \
# rustc must be > 1.36 or libzkgroup build will fail
# jfsutils to create a FS that works as a workaround for bug
# wget does not recognizes github certificates so curl replaces it well...
git zip curl tar cargo rustc make \
# seems missing on ubuntu images
ca-certificates
#python3 python3-pip && \
RUN update-ca-certificates
# Signal unpacking
WORKDIR /root
ENV SIGNAL_VERSION=${signal_version}
RUN curl -L -o signal-cli.tar.gz "https://github.com/AsamK/signal-cli/releases/download/v${SIGNAL_VERSION}/signal-cli-${SIGNAL_VERSION}.tar.gz"
RUN tar xf "signal-cli.tar.gz" -C /opt
RUN mv "/opt/signal-cli-${SIGNAL_VERSION}" /opt/signal-cli
# Compiles (or downloads) the native libzkgroup library for signal-cli
# See https://github.com/AsamK/signal-cli/wiki/Provide-native-lib-for-libsignal
COPY docker/libzkgroup libzkgroup
COPY --from=rust_fix /usr/src/zkgroup libzkgroup/zkgroup
WORKDIR libzkgroup
# This script tries to download precompiled binaries before falling back to compilation
RUN ./build.sh
# Copies libzkgroup where it belongs
WORKDIR ${TARGETPLATFORM}
# TODO Use option a. ; it allows running this step before the signal-cli installation
# and doesn't touch the signal-cli files
# Option a : Removes the classic library from the JAR (the alpine-compatible one has to be put somewhere in java.library.path)
RUN zip -d /opt/signal-cli/lib/zkgroup-java-*.jar libzkgroup.so
# Option b : Replaces the classic library directly inside the JAR with the compiled one
# Maybe less clean but also simpler in the second build stage
# RUN zip -d /opt/signal-cli/lib/zkgroup-java-*.jar libzkgroup.so && \
# zip /opt/signal-cli/lib/zkgroup-java-*.jar libzkgroup.*
######################################
# STAGE 3
#
# Base image (with Signal)
#
# TODO Since this image now also derives from python:3, make it a separate Dockerfile
# that inherits from the default nicobot (without signal support)
#
FROM nicolabs/nicobot:debian
ARG TARGETPLATFORM
LABEL signal="true"
# apt-utils : not required ; but there is a warning asking for it
# lsb_release is required by pip and not present on slim + ARM images
RUN apt-get update && \
apt-get install --reinstall -y apt-utils lsb-release && \
rm -rf /var/lib/apt/lists/*
# Java installation : copying JRE files from the official images has proven
# to be quite portable & smaller than via package installation.
# The tricky thing is to make sure to get all required files from the source image.
# Luckily this means only 3 directories here...
# TODO Better prepare this in the builder by following all symlinks
# and gathering all target files
COPY --from=openjdk:11-jre-slim-stretch /etc/ssl/certs/java /etc/ssl/certs/java
COPY --from=openjdk:11-jre-slim-stretch /etc/java-11-openjdk /etc/java-11-openjdk
COPY --from=openjdk:11-jre-slim-stretch /docker-java-home /opt/java
ENV JAVA_HOME=/opt/java
ENV PATH=${JAVA_HOME}/bin:${PATH}
# basic smoke test
RUN java --version
# The 'qr' command is used in the process of linking the machine with a Signal account
RUN python3 -m pip install --no-cache-dir --user --upgrade pip && \
python3 -m pip install --no-cache-dir --user qrcode[pil]
# signal-cli files
COPY --from=signal_builder /opt/signal-cli /opt/signal-cli
COPY --from=signal_builder /root/libzkgroup/${TARGETPLATFORM}/libzkgroup.* /opt/java/lib/
ENV PATH=/opt/signal-cli/bin:${PATH}

66
debian.Dockerfile Normal file
View file

@ -0,0 +1,66 @@
######################################
# STAGE 1 : Builder image
#
FROM python:3 as builder
RUN apt-get update
RUN apt-get install -y \
# "make" tools required to compile the Python modules
# not all may be required on all platforms...
cmake g++ make \
# More dependencies for the 'cryptography' module
# See https://cryptography.io/en/latest/installation.html#debian-ubuntu
build-essential libssl-dev libffi-dev python3-dev \
# git required by setuptools-scm during 'pip install'
git
# Rust is a requirement to build the 'cryptography' Python module
# Installs rust using the recommended 'rustup' method (vs apt-get,
# which seems to be less portable / outdated)
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
WORKDIR /usr/src/app
COPY . .
# This step WILL trigger a compilation on platforms without Python wheels
RUN python3 -m pip install --no-cache-dir --user --upgrade pip && \
python3 -m pip install --no-cache-dir --user -r requirements-runtime.txt .
# Not used currently (we just copy the /root/.local directory which has everyting thanks to the --user option)
# Finally put (only runtime) compiled wheels under ./wheels/
# https://pip.pypa.io/en/stable/user_guide/#installation-bundles
#RUN pip wheel -r requirements-runtime.txt . --wheel-dir=wheels
######################################
# STAGE 2 : Final image
#
# The base image must provide :
# - Python version > 3.4.2
# - bash
# - glibc
FROM python:3-slim
WORKDIR /usr/src/app
# Not used currently (we just copy the /root/.local directory which has everyting thanks to the --user option)
#COPY --from=builder /usr/src/app/wheels ./wheels
#RUN pip install --no-cache-dir --force-reinstall --ignore-installed --upgrade --no-index wheels/*
# https://www.docker.com/blog/containerized-python-development-part-1/
ENV PATH=/root/.local/bin:$PATH
# All Python files, including nicobot's ones
COPY --from=builder /root/.local /root/.local/
# This script allows :
# - packaging several bots in the same image (to be cleaner they could be in
# separate images but they're so close that it's a lot easier to package and
# does not waste space by duplicating layers)
# - also adds extra command line options for Signal device linking
# Otherwise the ENTRYPOINT would simply be [ "python"]
COPY docker/docker-entrypoint.sh .
ENTRYPOINT [ "./docker-entrypoint.sh" ]

View file

@ -1,29 +1,34 @@
#!/bin/bash
usage()
{
usage() {
cat << EOF
Usage : $0 [bot's name] [bot's arguments]
Usage : $0 <bot's name> [--signal-register <device name>]
[--qrcode-options <qr options>]
[bot's regular arguments]
Available bots :
- askbot
- transbot
Arguments :
<bot's name> One of 'askbot' or 'transbot'.
--signal-register <device name> Will display a QR Code to scan & register with
an existing Signal account. <device name> is a
string to identify the docker container as a
signal device.
--qrcode-options <qr options> Additional options (in one string) to the 'qr'
command. The QR Code can be printed directly
to the console without using this argument but
make sure to pass '-it' to 'docker run'.
See github.com/lincolnloop/python-qrcode.
[bot's regular arguments] All arguments that can be passed to the bot.
See github.com/nicolabs/nicobot.
E.g. '$0 transbot -h' to get a more specific help for 'transbot'
EOF
}
# Displays an URL and a QRCode in the console to link the current container
# with whichever Signal client will scan it
signal_link() {
device_name=$1
# WARNING This command works on alpine with bash installed, not tested otherwise
signal-cli link --name "${device_name}" | tee >(head -1 | qr)
}
# Default values
opt_signal_register=
opt_qrcode_options=
opt_bot=
# Parses the command line for options to execute before running the bot
@ -35,6 +40,9 @@ while true; do
(--signal-register)
opt_signal_register=$2
shift 2;;
(--qrcode-options)
opt_qrcode_options=$2
shift 2;;
(askbot|transbot)
opt_bot=$1
shift;;
@ -46,15 +54,22 @@ done
# Registers the device with signal
if [ -n "${opt_signal_register}" ]; then
signal_link "${opt_signal_register}"
# Displays an URL and a QRCode in the console to link the current container
# with whichever Signal client will scan it.
# NOTES :
# - This syntax requires bash.
# - It seems this command does not return a 0 status even when the operation succeeded
signal-cli link --name "${opt_signal_register}" | tee >(head -1 | qr ${opt_qrcode_options})
fi
# Runs the right bot with the remaining args
case "${opt_bot}" in
askbot|transbot)
exec python3 -m "nicobot.${opt_bot}" "$@"
#exec python3 -m "nicobot.${opt_bot}" "$@"
exec "${opt_bot}" "$@"
;;
*)
echo "Unknown bot : '*{opt_bot}'" >2
usage
exit 1
;;

65
docker/libzkgroup/build.sh Executable file
View file

@ -0,0 +1,65 @@
#!/bin/sh
# There are numerous problems to build libzkgroup with Docker, and it's slow.
# This script attempts to download compiled binaries first.
# In the end the downloaded file will be in a directory named after $TARGETPLATFORM (defined by docker).
# E.g. `linux/arm/v7/libzkgroup.so` or 'windows/amd64/libzkgroup.dll'
# If download fails, some local files may be available as a fallback (but may be outdated as they are manually generated)
# If everything fails, it will compile it (but it could take a very long time)
# Not all platforms may compile.
ZKGROUP_FORCE_COMPILE=
ZKGROUP_VERSION=0.7.1
[ "$1" != "" ] && ZGROUP_VERSION=$1
if [ -z "${ZKGROUP_FORCE_COMPILE}" ]; then
# First, tries downloading it
case ${TARGETPLATFORM} in
"linux/amd64")
# This binary should already be provided by the zkgroup project
mkdir -p "${TARGETPLATFORM}"
curl -L -o "${TARGETPLATFORM}/libzkgroup.so" "https://github.com/signalapp/zkgroup/releases/download/v${ZKGROUP_VERSION}/libzkgroup.so"
;;
"windows/amd64")
# This binary should already be provided by the zkgroup project
mkdir -p "${TARGETPLATFORM}"
curl -L -o "${TARGETPLATFORM}/libzkgroup.dll" "https://github.com/signalapp/zkgroup/releases/download/v${ZKGROUP_VERSION}/libzkgroup.dll"
;;
"linux/arm64")
# This binary may already be provided within signal-cli-rest-api
mkdir -p "${TARGETPLATFORM}"
curl -L -o "${TARGETPLATFORM}/libzkgroup.so" "https://github.com/bbernhard/signal-cli-rest-api/raw/master/ext/libraries/zkgroup/v${ZKGROUP_VERSION}/arm64/libzkgroup.so"
;;
"linux/arm/v7")
# This binary may already be provided within signal-cli-rest-api
mkdir -p "${TARGETPLATFORM}"
curl -L -o "${TARGETPLATFORM}/libzkgroup.so" "https://github.com/bbernhard/signal-cli-rest-api/raw/master/ext/libraries/zkgroup/v${ZKGROUP_VERSION}/armv7/libzkgroup.so"
;;
esac
# Checks that there is a file at the destination path
if [ `find ${TARGETPLATFORM} -name 'libzkgroup.*' 2>/dev/null | wc -l` -ge 1 ]; then
echo "Found existing binary :"
find ${TARGETPLATFORM} -name 'libzkgroup.*'
return 0
fi
# End if -z "${ZKGROUP_FORCE_COMPILE}"
fi
# Else, compiles libzkgroup
# This is by far the most risky and longest option...
# It requires : git, curl, make, rust
# Because of https://github.com/docker/buildx/issues/395 we need the files to be
# provided and the build to be offline
[ -d zkgroup ] || git clone https://github.com/signalapp/zkgroup.git zkgroup
cd zkgroup
if [ -d vendor ]; then
RUSTFLAGS='-C link-arg=-s' cargo build --release --offline
else
make libzkgroup
fi
cd ..
mkdir -p "${TARGETPLATFORM}"
mv zkgroup/target/release/libzkgroup.* "${TARGETPLATFORM}"