+ signal dependencies (JDK, libckgroup)

This commit is contained in:
nicobo 2020-12-23 23:27:08 +01:00
parent 99b66cfdb8
commit 465e323e5b
No known key found for this signature in database
GPG key ID: 2581E71C5FA5285F
7 changed files with 148 additions and 33 deletions

View file

@ -9,6 +9,8 @@
FROM python:3-alpine AS builder
WORKDIR /root
# python:3-alpine misses gcc, ffi.h, ...
#
# GCC part :
@ -19,10 +21,42 @@ FROM python:3-alpine AS builder
# https://stackoverflow.com/questions/35736598/cannot-pip-install-cryptography-in-docker-alpine-linux-3-3-with-openssl-1-0-2g
# https://github.com/pyca/cryptography/blob/1340c00/docs/installation.rst#building-cryptography-on-linux
# Required to build
# build-base gcc ... : required to build Python dependencies
# openjdk : javac to compile GetSystemProperty.java (to check the value of java.library.path)
# git zip cargo make : to compile libzkgroup
RUN apk add --no-cache build-base gcc abuild binutils cmake \
libressl-dev musl-dev libffi-dev
libressl-dev musl-dev libffi-dev \
openjdk11 \
git zip cargo make
WORKDIR /root
RUN git clone https://github.com/signalapp/zkgroup.git
WORKDIR /root/zkgroup
RUN make libzkgroup
# A helper tool to get Java's library path (not automated, just or manual checks)
WORKDIR /root
ENV PATH=/usr/lib/jvm/java-11-openjdk/bin:$PATH
COPY docker/GetSystemProperty.java .
RUN javac -cp . GetSystemProperty.java
RUN java GetSystemProperty java.library.path > /root/java.library.path.txt
# Signal installation
WORKDIR /root
# TODO Allow this to be a build variable
ENV SIGNAL_VERSION=0.7.1
RUN wget "https://github.com/AsamK/signal-cli/releases/download/v${SIGNAL_VERSION}/signal-cli-${SIGNAL_VERSION}.tar.gz"
RUN tar xf "signal-cli-${SIGNAL_VERSION}.tar.gz" -C /opt
RUN mv "/opt/signal-cli-${SIGNAL_VERSION}" /opt/signal-cli
# Compiles the native libzkgroup depency for alpine/musl libc
# See https://github.com/AsamK/signal-cli/wiki/Provide-native-lib-for-libsignal
# Option a : Removes the classic library from the JAR (the alpine-compatible one has to be put somewhere in java.library.path)
#RUN zip -d /opt/signal-cli/lib/zkgroup-java-*.jar libzkgroup.so
# Option b : Replaces the classic library directly inside the JAR with the compiled one
# Maybe less clean but also simpler in the second build stage
RUN jar -uf /opt/signal-cli/lib/zkgroup-java-*.jar -C /root/zkgroup/target/release libzkgroup.so
WORKDIR /usr/src/app
COPY requirements-runtime.txt .
RUN pip install --no-cache-dir --user -r requirements-runtime.txt
# The 'qr' command is used in the process to link the machine with a Signal account
@ -33,12 +67,6 @@ RUN pip install --no-cache-dir --user qrcode
# But it requires a bit of work
#RUN python setup.py sdist bdist_wheel
# Signal installation
ENV SIGNAL_VERSION=0.6.7
RUN wget "https://github.com/AsamK/signal-cli/releases/download/v${SIGNAL_VERSION}/signal-cli-${SIGNAL_VERSION}.tar.gz"
RUN tar xf "signal-cli-${SIGNAL_VERSION}.tar.gz" -C /opt
RUN mv "/opt/signal-cli-${SIGNAL_VERSION}" /opt/signal-cli
#################
@ -49,28 +77,35 @@ FROM python:3-alpine
WORKDIR /usr/src/app
# Required at runtime
# Runtime packages requirements
#
# libressl-dev : seems required for python to locate modules, or for omemo ?
#
# openjdk : requirement for signal-cli
# A Java 8+ runtime seems to be required for 0.6, 0.7 requires JRE 11 (which is 50MB bigger...)
# For an even smaller JRE image, see maybe https://github.com/rhuss/docker-java-jolokia/blob/master/base/alpine/jre/8/Dockerfile
# or https://hub.docker.com/r/azul/zulu-openjdk-alpine/dockerfile
#
# bash is to use extended syntax in entrypoint.sh (in particular tee >(...))
RUN apk add --no-cache libressl-dev
RUN apk add --no-cache bash openjdk8-jre
#
# rust brings the runtime requirements for the zkgroup library (for signal-cli)
# TODO rust (or cargo) highly increase the size of the image : identify the minimal requirements
# See https://blog.logrocket.com/packaging-a-rust-web-service-using-docker/
RUN apk add --no-cache libressl-dev bash openjdk11-jre rust
# NOTE The above requirements of JRE and rust totally ruins the point
# of using alpine to build small images...
# All Python files, including nicobot's
COPY --from=builder /root/.local /root/.local/
# https://www.docker.com/blog/containerized-python-development-part-1/
ENV PATH=/root/.local/bin:$PATH
#COPY --from=builder /root/.cache /root/.cache/
#COPY --from=builder /usr/local/lib /usr/local/lib/
# signal-cli files
COPY --from=builder /opt/signal-cli /opt/signal-cli
ENV PATH=/opt/signal-cli/bin:$PATH
# TODO How to do it with one COPY ?
# Or it could be COPY . . with a proper .dockerignore
# Or build the context as a preliminary step
COPY nicobot nicobot/
@ -80,5 +115,5 @@ COPY nicobot nicobot/
# Otherwise the ENTRYPOINT should simply be [ "python"]
# Made a a separate COPY because it's a docker-specific layer
# (other layers don't need to be re-built if this one changes)
COPY docker-entrypoint.sh .
COPY docker/docker-entrypoint.sh .
ENTRYPOINT [ "./docker-entrypoint.sh" ]

View file

@ -1,11 +1,33 @@
FROM python:3 as builder
# Signal installation
WORKDIR /root
# TODO Allow this to be a build variable
ENV SIGNAL_VERSION=0.7.1
RUN wget "https://github.com/AsamK/signal-cli/releases/download/v${SIGNAL_VERSION}/signal-cli-${SIGNAL_VERSION}.tar.gz"
RUN tar xf "signal-cli-${SIGNAL_VERSION}.tar.gz" -C /opt
RUN mv "/opt/signal-cli-${SIGNAL_VERSION}" /opt/signal-cli
# Provides :
# - Python version > 3.4.2
# - bash
# - glibc
# Python version > 3.4.2
FROM python:3
WORKDIR /usr/src/app
RUN apt-get update && \
apt install -y cmake g++ make && \
apt install -y cmake g++ make \
default-jre-headless \
&& \
rm -rf /var/lib/apt/lists/*
WORKDIR /usr/src/app
# signal-cli files
COPY --from=builder /opt/signal-cli /opt/signal-cli
ENV PATH=/opt/signal-cli/bin:$PATH
# TODO How to do it with one COPY ?
# Or it could be COPY . . with a proper .dockerignore
@ -25,5 +47,5 @@ RUN pip install --no-cache-dir -r requirements-runtime.txt
# Otherwise the ENTRYPOINT should simply be [ "python"]
# Made a a separate COPY because it's a docker-specific layer
# (other layers don't need to be re-built if this one changes)
COPY docker-entrypoint.sh .
COPY docker/docker-entrypoint.sh .
ENTRYPOINT [ "./docker-entrypoint.sh" ]

View file

@ -1,11 +1,36 @@
# Python version > 3.4.2
FROM python:3 as builder
# Signal installation
WORKDIR /root
# TODO Allow this to be a build variable
ENV SIGNAL_VERSION=0.7.1
RUN wget "https://github.com/AsamK/signal-cli/releases/download/v${SIGNAL_VERSION}/signal-cli-${SIGNAL_VERSION}.tar.gz"
RUN tar xf "signal-cli-${SIGNAL_VERSION}.tar.gz" -C /opt
RUN mv "/opt/signal-cli-${SIGNAL_VERSION}" /opt/signal-cli
# Provides :
# - Python version > 3.4.2
# - bash
# - glibc
FROM python:3-slim
WORKDIR /usr/src/app
# Or else we get the following error installing the jre :
# update-alternatives: error: error creating symbolic link '/usr/share/man/man1/rmid.1.gz.dpkg-tmp': No such file or directory
RUN mkdir /usr/share/man/man1 /usr/share/man/man7
RUN apt-get update && \
apt install -y cmake g++ make && \
apt install -y cmake g++ make \
default-jre-headless \
&& \
rm -rf /var/lib/apt/lists/*
WORKDIR /usr/src/app
# signal-cli files
COPY --from=builder /opt/signal-cli /opt/signal-cli
ENV PATH=/opt/signal-cli/bin:$PATH
# TODO How to do it with one COPY ?
# Or it could be COPY . . with a proper .dockerignore
@ -25,5 +50,5 @@ RUN pip install --no-cache-dir -r requirements-runtime.txt
# Otherwise the ENTRYPOINT should simply be [ "python"]
# Made a a separate COPY because it's a docker-specific layer
# (other layers don't need to be re-built if this one changes)
COPY docker-entrypoint.sh .
COPY docker/docker-entrypoint.sh .
ENTRYPOINT [ "./docker-entrypoint.sh" ]

View file

@ -31,15 +31,15 @@ There are several [Docker](https://docker.com) images available, with the follow
- **debian** : if you have several images with the debian base, this may be the most efficient (as base layers will be shared with other images)
- **debian-slim** : if you want a smaller-sized image and you don't run other images based on debian (as it will not share as much layers as with the above `debian` tag)
- **alpine** : this is the smallest image (<100MB) but it may have more bugs than debian ones because it's more complex to maintain
- **alpine** : this should be the smallest image in theory, but it's more complex to maintain and thereore might not meet this expectation ; please check/test before use
Since those bots are probably not going be enterprise-level critical at any point, I suggest you use the _alpine_ image and switch to _debian_ or _debian-slim_ if you encounter performance issues or other problems.
**NOTE** that the _signal-cli_ backend needs a _Java_ runtime environment, and also _rust_ dependencies to support Signal's group V2. This approximately doubles the size of the images...
Those images should be able to run on all CPU architectures supported by [the base images](https://hub.docker.com/_/python).
The current state of those images is such that I suggest you try the _debian-slim_ image first and switch to another one if you encounter issues or have a specific use case to solve.
Sample command to run :
Sample run command :
docker run --rm -it -v "myconfdir:/etc/nicobot" nicolabs/nicobot:alpine transbot -C /etc/nicobot
docker run --rm -it -v "myconfdir:/etc/nicobot" nicolabs/nicobot:debian-slim transbot -C /etc/nicobot
### Installation from source
@ -295,14 +295,25 @@ There are several Dockerfile, each made for specific use cases (see [Docker-usag
`Dockerfile-debian` and `Dockerfile-debian-slim` are quite straight and very similar.
`Dockerfile-alpine` is a multi-stage build because most of the Python dependencies need to be compiled first.
The first stage builds the libraries and the second stage just imports them without all the build tools.
The result is a far smaller image.
`Dockerfile-alpine` requires a [multi-stage build](https://docs.docker.com/develop/develop-images/multistage-build/) because most of the Python dependencies need to be compiled first.
The result is a far smaller image than if we had all the compiling/building tools embedded.
There is no special requirement to build those images ; sample build & run commands :
Those images are limited to CPU architectures :
- supported by [the base images](https://hub.docker.com/_/python)
- for which the Python dependencies are built or able to build
- for which the native dependencies of signal (libzkgroup) can be built (alpine only)
docker build -t nicobot:alpine -f Dockerfile-alpine .
docker run --rm -it -v "$(pwd)/tests:/etc/nicobot" nicolabs/nicobot:alpine askbot -c /etc/nicobot/askbot-sample-conf/config.yml
Simple build command (single architecture) :
docker build -t nicolabs/nicobot:debian-slim -f Dockerfile-debian-slim .
Sample buildx command (multi-arch) :
docker buildx build --platform linux/386,linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x -t nicolabs/nicobot:debian-slim -f Dockerfile-debian-slim .
Then run with the provided sample configuration :
docker run --rm -it -v "$(pwd)/tests:/etc/nicobot" nicolabs/nicobot:debian-slim askbot -c /etc/nicobot/askbot-sample-conf/config.yml
Github actions are actually configured (see [dockerhub.yml](.github/workflows/dockerhub.yml) to automatically build and push the images to Docker Hub so they are available whenever commits are pushed to the _master_ branch.

View file

@ -0,0 +1,21 @@
import java.util.Properties;
import java.util.Enumeration;
public class GetSystemProperty {
public static void main(String args[]) {
if( args.length == 0 ) {
Properties p = System.getProperties();
Enumeration keys = p.keys();
while (keys.hasMoreElements()) {
String key = (String)keys.nextElement();
String value = (String)p.get(key);
System.out.println(key + " : " + value);
}
}
else {
for (String key: args) {
System.out.println(System.getProperty( key ));
}
}
}
}

View file

@ -0,0 +1 @@
/usr/lib/jvm/java-11-openjdk/lib/server:/usr/lib/jvm/java-11-openjdk/lib:/usr/lib/jvm/java-11-openjdk/../lib:/usr/java/packages/lib:/usr/lib64:/lib64:/lib:/usr/lib