Compare commits

3 Commits

Author SHA1 Message Date
3532401032 not loading model at startup 2025-12-04 19:41:38 +01:00
f0035f0c26 Downloading lmstudio on build 2025-12-04 11:56:15 +01:00
90fd9cdfad ENV later 2025-12-03 18:31:58 +01:00
2 changed files with 6 additions and 9 deletions

View File

@@ -33,18 +33,17 @@ eot
RUN <<eot
echo 'export PATH="${PATH}:/root/.local/bin"' >>/root/.bashrc
echo 'export LLAMA_THREADS=12' >> ~/.bashrc
echo 'export DISPLAY=:99' >> ~/.bashrc
echo 'alias lms="~/.lmstudio/bin/lms"' >> ~/.bashrc
eot
ENV DISPLAY=:99
# Not sure that this works
ENV LLAMA_THREADS=12
#########################
FROM baseimage AS final
ADD ./LM-Studio* /data/lms/
ADD https://lmstudio.ai/download/latest/linux/x64 /data/lms/LMStudio.AppImage
ADD ./http-server-config.json /http-server-config.json
RUN <<eot
@@ -62,6 +61,9 @@ ADD --chmod=0755 ./docker-healthcheck.sh /usr/local/bin/
#HEALTHCHECK --interval=1m --timeout=10s --start-period=1m \
# CMD /bin/bash /usr/local/bin/docker-healthcheck.sh || exit 1
ENV DISPLAY=:99
# Not sure that this works
ENV LLAMA_THREADS=12
# Run the server
# CMD ["sh", "-c", "tail -f /dev/null"] # For development: keep container open

View File

@@ -15,11 +15,6 @@ sleep 2
sleep 30
~/.lmstudio/bin/lms server start --cors &
sleep 5
# ~/.cache/lm-studio/bin/lms get ${MODEL_PATH}
~/.lmstudio/bin/lms load --ttl 3600 --context-length ${CONTEXT_LENGTH:-16384} ${MODEL_IDENTIFIER:openai/gpt-oss-20b} &
sleep 20
cp -f /http-server-config.json /root/.lmstudio/.internal/http-server-config.json
x11vnc -display :99 -forever -rfbauth /root/.vnc/passwd -quiet -listen 0.0.0.0 -xkb