adding pipeline logic and multi arching

This commit is contained in:
thelamer
2019-01-16 01:51:19 -08:00
parent 6e0fc5e84c
commit a99f76a006
7 changed files with 1535 additions and 4 deletions

View File

@@ -7,6 +7,7 @@ ARG XMLTV_VER="0.5.69"
# environment settings
ARG TZ="Europe/Oslo"
ARG TVHEADEND_COMMIT
ENV HOME="/config"
# copy patches
@@ -29,6 +30,7 @@ RUN \
gettext-dev \
git \
gzip \
jq \
libcurl \
libdvbcsa-dev \
libgcrypt-dev \
@@ -144,7 +146,18 @@ RUN \
RUN \
echo "**** compile tvheadend ****" && \
git clone https://github.com/tvheadend/tvheadend.git /tmp/tvheadend && \
if [ -z ${TVHEADEND_COMMIT+x} ]; then \
TVHEADEND_COMMIT=$(curl -sX GET https://api.github.com/repos/tvheadend/tvheadend/commits/master \
| jq -r '. | .sha'); \
fi && \
curl -o \
/tmp/tvheadend.tar.gz -L \
"https://github.com/tvheadend/tvheadend/archive/${TVHEADEND_COMMIT}.tar.gz" && \
mkdir -p \
/tmp/tvheadend && \
tar xf \
/tmp/tvheadend.tar.gz -C \
/tmp/tvheadend/ --strip-components=1 && \
cd /tmp/tvheadend && \
./configure \
`#Encoding` \

344
Dockerfile.aarch64 Normal file
View File

@@ -0,0 +1,344 @@
FROM lsiobase/alpine.arm64:3.8 as buildstage
############## build stage ##############
# package versions
ARG ARGTABLE_VER="2.13"
ARG XMLTV_VER="0.5.69"
# environment settings
ARG TZ="Europe/Oslo"
ARG TVHEADEND_COMMIT
ENV HOME="/config"
# copy patches
COPY patches/ /tmp/patches/
RUN \
echo "**** install build packages ****" && \
apk add --no-cache \
autoconf \
automake \
bsd-compat-headers \
bzip2 \
cmake \
curl \
ffmpeg-dev \
file \
findutils \
g++ \
gcc \
gettext-dev \
git \
gzip \
jq \
libcurl \
libdvbcsa-dev \
libgcrypt-dev \
libhdhomerun-dev \
libtool \
libva-dev \
libvpx-dev \
libxml2-dev \
libxslt-dev \
linux-headers \
make \
openssl-dev \
opus-dev \
patch \
pcre2-dev \
perl-archive-zip \
perl-boolean \
perl-capture-tiny \
perl-cgi \
perl-compress-raw-zlib \
perl-data-dumper \
perl-date-manip \
perl-datetime \
perl-datetime-format-strptime \
perl-datetime-timezone \
perl-dbd-sqlite \
perl-dbi \
perl-dev \
perl-digest-sha1 \
perl-doc \
perl-file-slurp \
perl-file-temp \
perl-file-which \
perl-getopt-long \
perl-html-parser \
perl-html-tree \
perl-http-cookies \
perl-io \
perl-io-compress \
perl-io-html \
perl-io-socket-ssl \
perl-io-stringy \
perl-json \
perl-libwww \
perl-lingua-en-numbers-ordinate \
perl-lingua-preferred \
perl-list-moreutils \
perl-module-build \
perl-module-pluggable \
perl-net-ssleay \
perl-parse-recdescent \
perl-path-class \
perl-scalar-list-utils \
perl-term-progressbar \
perl-term-readkey \
perl-test-exception \
perl-test-requires \
perl-timedate \
perl-try-tiny \
perl-unicode-string \
perl-xml-libxml \
perl-xml-libxslt \
perl-xml-parser \
perl-xml-sax \
perl-xml-treepp \
perl-xml-twig \
perl-xml-writer \
pkgconf \
pngquant \
python \
sdl-dev \
tar \
uriparser-dev \
wget \
x264-dev \
x265-dev \
zlib-dev && \
apk add --no-cache \
--repository http://nl.alpinelinux.org/alpine/edge/testing \
gnu-libiconv-dev
RUN \
echo "**** remove musl iconv.h and replace with gnu-iconv.h ****" && \
rm -rf /usr/include/iconv.h && \
cp /usr/include/gnu-libiconv/iconv.h /usr/include/iconv.h
RUN \
echo "**** install perl modules for xmltv ****" && \
curl -L https://cpanmin.us | perl - App::cpanminus && \
cpanm --installdeps /tmp/patches
RUN \
echo "**** compile XMLTV ****" && \
curl -o \
/tmp/xmtltv-src.tar.bz2 -L \
"https://sourceforge.net/projects/xmltv/files/xmltv/${XMLTV_VER}/xmltv-${XMLTV_VER}.tar.bz2" && \
tar xf \
/tmp/xmtltv-src.tar.bz2 -C \
/tmp --strip-components=1 && \
cd "/tmp/xmltv-${XMLTV_VER}" && \
echo "**** Perl 5.26 fixes for XMTLV ****" && \
sed "s/use POSIX 'tmpnam';//" -i filter/tv_to_latex && \
sed "s/use POSIX 'tmpnam';//" -i filter/tv_to_text && \
sed "s/\(lib\/set_share_dir.pl';\)/.\/\1/" -i grab/it/tv_grab_it.PL && \
sed "s/\(filter\/Grep.pm';\)/.\/\1/" -i filter/tv_grep.PL && \
sed "s/\(lib\/XMLTV.pm.in';\)/.\/\1/" -i lib/XMLTV.pm.PL && \
sed "s/\(lib\/Ask\/Term.pm';\)/.\/\1/" -i Makefile.PL && \
PERL5LIB=`pwd` && \
echo -e "yes" | perl Makefile.PL PREFIX=/usr/ INSTALLDIRS=vendor && \
make && \
make test && \
make DESTDIR=/tmp/xmltv-build install
RUN \
echo "**** compile tvheadend ****" && \
if [ -z ${TVHEADEND_COMMIT+x} ]; then \
TVHEADEND_COMMIT=$(curl -sX GET https://api.github.com/repos/tvheadend/tvheadend/commits/master \
| jq -r '. | .sha'); \
fi && \
curl -o \
/tmp/tvheadend.tar.gz -L \
"https://github.com/tvheadend/tvheadend/archive/${TVHEADEND_COMMIT}.tar.gz" && \
mkdir -p \
/tmp/tvheadend && \
tar xf \
/tmp/tvheadend.tar.gz -C \
/tmp/tvheadend/ --strip-components=1 && \
cd /tmp/tvheadend && \
./configure \
`#Encoding` \
--disable-ffmpeg_static \
--disable-libfdkaac_static \
--disable-libtheora_static \
--disable-libopus_static \
--disable-libvorbis_static \
--disable-libvpx_static \
--disable-libx264_static \
--disable-libx265_static \
--disable-libfdkaac \
--enable-libopus \
--enable-libvorbis \
--enable-libvpx \
--enable-libx264 \
--enable-libx265 \
\
`#Options` \
--disable-avahi \
--disable-dbus_1 \
--disable-bintray_cache \
--disable-hdhomerun_static \
--enable-hdhomerun_client \
--enable-libav \
--enable-pngquant \
--enable-trace \
--enable-vaapi \
--infodir=/usr/share/info \
--localstatedir=/var \
--mandir=/usr/share/man \
--prefix=/usr \
--sysconfdir=/config && \
make && \
make DESTDIR=/tmp/tvheadend-build install
RUN \
echo "**** compile argtable2 ****" && \
ARGTABLE_VER1="${ARGTABLE_VER//./-}" && \
mkdir -p \
/tmp/argtable && \
curl -o \
/tmp/argtable-src.tar.gz -L \
"https://sourceforge.net/projects/argtable/files/argtable/argtable-${ARGTABLE_VER}/argtable${ARGTABLE_VER1}.tar.gz" && \
tar xf \
/tmp/argtable-src.tar.gz -C \
/tmp/argtable --strip-components=1 && \
cp /tmp/patches/config.* /tmp/argtable && \
cd /tmp/argtable && \
./configure \
--prefix=/usr && \
make && \
make check && \
make DESTDIR=/tmp/argtable-build install && \
echo "**** copy to /usr for comskip dependency ****" && \
cp -pr /tmp/argtable-build/usr/* /usr/
RUN \
echo "***** compile comskip ****" && \
git clone git://github.com/erikkaashoek/Comskip /tmp/comskip && \
cd /tmp/comskip && \
./autogen.sh && \
./configure \
--bindir=/usr/bin \
--sysconfdir=/config/comskip && \
make && \
make DESTDIR=/tmp/comskip-build install
############## runtime stage ##############
FROM lsiobase/alpine.arm64:3.8
# Add qemu to build on x86_64 systems
COPY qemu-aarch64-static /usr/bin
# set version label
ARG BUILD_DATE
ARG VERSION
LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}"
LABEL maintainer="saarg"
# environment settings
ENV HOME="/config"
RUN \
echo "**** install runtime packages ****" && \
apk add --no-cache \
bsd-compat-headers \
bzip2 \
curl \
ffmpeg \
ffmpeg-libs \
gzip \
libcrypto1.0 \
libcurl \
libdvbcsa \
libhdhomerun-libs \
libssl1.0 \
libva \
libva-intel-driver \
libvpx \
libxml2 \
libxslt \
linux-headers \
openssl \
opus \
pcre2 \
perl \
perl-archive-zip \
perl-boolean \
perl-capture-tiny \
perl-cgi \
perl-compress-raw-zlib \
perl-data-dumper \
perl-date-manip \
perl-datetime \
perl-datetime-format-strptime \
perl-datetime-timezone \
perl-dbd-sqlite \
perl-dbi \
perl-digest-sha1 \
perl-doc \
perl-file-slurp \
perl-file-temp \
perl-file-which \
perl-getopt-long \
perl-html-parser \
perl-html-tree \
perl-http-cookies \
perl-io \
perl-io-compress \
perl-io-html \
perl-io-socket-ssl \
perl-io-stringy \
perl-json \
perl-libwww \
perl-lingua-en-numbers-ordinate \
perl-lingua-preferred \
perl-list-moreutils \
perl-module-build \
perl-module-pluggable \
perl-net-ssleay \
perl-parse-recdescent \
perl-path-class \
perl-scalar-list-utils \
perl-term-progressbar \
perl-term-readkey \
perl-test-exception \
perl-test-requires \
perl-timedate \
perl-try-tiny \
perl-unicode-string \
perl-xml-libxml \
perl-xml-libxslt \
perl-xml-parser \
perl-xml-sax \
perl-xml-treepp \
perl-xml-twig \
perl-xml-writer \
python \
tar \
uriparser \
wget \
x264 \
x265 \
zlib && \
apk add --no-cache \
--repository http://nl.alpinelinux.org/alpine/edge/testing \
gnu-libiconv
# copy local files and buildstage artifacts
COPY --from=buildstage /tmp/argtable-build/usr/ /usr/
COPY --from=buildstage /tmp/comskip-build/usr/ /usr/
COPY --from=buildstage /tmp/tvheadend-build/usr/ /usr/
COPY --from=buildstage /tmp/xmltv-build/usr/ /usr/
COPY --from=buildstage /usr/local/share/man/ /usr/local/share/man/
COPY --from=buildstage /usr/local/share/perl5/ /usr/local/share/perl5/
COPY root/ /
# add picons
ADD picons.tar.bz2 /picons
# ports and volumes
EXPOSE 9981 9982
VOLUME /config /recordings

344
Dockerfile.armhf Normal file
View File

@@ -0,0 +1,344 @@
FROM lsiobase/alpine.armhf:3.8 as buildstage
############## build stage ##############
# package versions
ARG ARGTABLE_VER="2.13"
ARG XMLTV_VER="0.5.69"
# environment settings
ARG TZ="Europe/Oslo"
ARG TVHEADEND_COMMIT
ENV HOME="/config"
# copy patches
COPY patches/ /tmp/patches/
RUN \
echo "**** install build packages ****" && \
apk add --no-cache \
autoconf \
automake \
bsd-compat-headers \
bzip2 \
cmake \
curl \
ffmpeg-dev \
file \
findutils \
g++ \
gcc \
gettext-dev \
git \
gzip \
jq \
libcurl \
libdvbcsa-dev \
libgcrypt-dev \
libhdhomerun-dev \
libtool \
libva-dev \
libvpx-dev \
libxml2-dev \
libxslt-dev \
linux-headers \
make \
openssl-dev \
opus-dev \
patch \
pcre2-dev \
perl-archive-zip \
perl-boolean \
perl-capture-tiny \
perl-cgi \
perl-compress-raw-zlib \
perl-data-dumper \
perl-date-manip \
perl-datetime \
perl-datetime-format-strptime \
perl-datetime-timezone \
perl-dbd-sqlite \
perl-dbi \
perl-dev \
perl-digest-sha1 \
perl-doc \
perl-file-slurp \
perl-file-temp \
perl-file-which \
perl-getopt-long \
perl-html-parser \
perl-html-tree \
perl-http-cookies \
perl-io \
perl-io-compress \
perl-io-html \
perl-io-socket-ssl \
perl-io-stringy \
perl-json \
perl-libwww \
perl-lingua-en-numbers-ordinate \
perl-lingua-preferred \
perl-list-moreutils \
perl-module-build \
perl-module-pluggable \
perl-net-ssleay \
perl-parse-recdescent \
perl-path-class \
perl-scalar-list-utils \
perl-term-progressbar \
perl-term-readkey \
perl-test-exception \
perl-test-requires \
perl-timedate \
perl-try-tiny \
perl-unicode-string \
perl-xml-libxml \
perl-xml-libxslt \
perl-xml-parser \
perl-xml-sax \
perl-xml-treepp \
perl-xml-twig \
perl-xml-writer \
pkgconf \
pngquant \
python \
sdl-dev \
tar \
uriparser-dev \
wget \
x264-dev \
x265-dev \
zlib-dev && \
apk add --no-cache \
--repository http://nl.alpinelinux.org/alpine/edge/testing \
gnu-libiconv-dev
RUN \
echo "**** remove musl iconv.h and replace with gnu-iconv.h ****" && \
rm -rf /usr/include/iconv.h && \
cp /usr/include/gnu-libiconv/iconv.h /usr/include/iconv.h
RUN \
echo "**** install perl modules for xmltv ****" && \
curl -L https://cpanmin.us | perl - App::cpanminus && \
cpanm --installdeps /tmp/patches
RUN \
echo "**** compile XMLTV ****" && \
curl -o \
/tmp/xmtltv-src.tar.bz2 -L \
"https://sourceforge.net/projects/xmltv/files/xmltv/${XMLTV_VER}/xmltv-${XMLTV_VER}.tar.bz2" && \
tar xf \
/tmp/xmtltv-src.tar.bz2 -C \
/tmp --strip-components=1 && \
cd "/tmp/xmltv-${XMLTV_VER}" && \
echo "**** Perl 5.26 fixes for XMTLV ****" && \
sed "s/use POSIX 'tmpnam';//" -i filter/tv_to_latex && \
sed "s/use POSIX 'tmpnam';//" -i filter/tv_to_text && \
sed "s/\(lib\/set_share_dir.pl';\)/.\/\1/" -i grab/it/tv_grab_it.PL && \
sed "s/\(filter\/Grep.pm';\)/.\/\1/" -i filter/tv_grep.PL && \
sed "s/\(lib\/XMLTV.pm.in';\)/.\/\1/" -i lib/XMLTV.pm.PL && \
sed "s/\(lib\/Ask\/Term.pm';\)/.\/\1/" -i Makefile.PL && \
PERL5LIB=`pwd` && \
echo -e "yes" | perl Makefile.PL PREFIX=/usr/ INSTALLDIRS=vendor && \
make && \
make test && \
make DESTDIR=/tmp/xmltv-build install
RUN \
echo "**** compile tvheadend ****" && \
if [ -z ${TVHEADEND_COMMIT+x} ]; then \
TVHEADEND_COMMIT=$(curl -sX GET https://api.github.com/repos/tvheadend/tvheadend/commits/master \
| jq -r '. | .sha'); \
fi && \
curl -o \
/tmp/tvheadend.tar.gz -L \
"https://github.com/tvheadend/tvheadend/archive/${TVHEADEND_COMMIT}.tar.gz" && \
mkdir -p \
/tmp/tvheadend && \
tar xf \
/tmp/tvheadend.tar.gz -C \
/tmp/tvheadend/ --strip-components=1 && \
cd /tmp/tvheadend && \
./configure \
`#Encoding` \
--disable-ffmpeg_static \
--disable-libfdkaac_static \
--disable-libtheora_static \
--disable-libopus_static \
--disable-libvorbis_static \
--disable-libvpx_static \
--disable-libx264_static \
--disable-libx265_static \
--disable-libfdkaac \
--enable-libopus \
--enable-libvorbis \
--enable-libvpx \
--enable-libx264 \
--enable-libx265 \
\
`#Options` \
--disable-avahi \
--disable-dbus_1 \
--disable-bintray_cache \
--disable-hdhomerun_static \
--enable-hdhomerun_client \
--enable-libav \
--enable-pngquant \
--enable-trace \
--enable-vaapi \
--infodir=/usr/share/info \
--localstatedir=/var \
--mandir=/usr/share/man \
--prefix=/usr \
--sysconfdir=/config && \
make && \
make DESTDIR=/tmp/tvheadend-build install
RUN \
echo "**** compile argtable2 ****" && \
ARGTABLE_VER1="${ARGTABLE_VER//./-}" && \
mkdir -p \
/tmp/argtable && \
curl -o \
/tmp/argtable-src.tar.gz -L \
"https://sourceforge.net/projects/argtable/files/argtable/argtable-${ARGTABLE_VER}/argtable${ARGTABLE_VER1}.tar.gz" && \
tar xf \
/tmp/argtable-src.tar.gz -C \
/tmp/argtable --strip-components=1 && \
cp /tmp/patches/config.* /tmp/argtable && \
cd /tmp/argtable && \
./configure \
--prefix=/usr && \
make && \
make check && \
make DESTDIR=/tmp/argtable-build install && \
echo "**** copy to /usr for comskip dependency ****" && \
cp -pr /tmp/argtable-build/usr/* /usr/
RUN \
echo "***** compile comskip ****" && \
git clone git://github.com/erikkaashoek/Comskip /tmp/comskip && \
cd /tmp/comskip && \
./autogen.sh && \
./configure \
--bindir=/usr/bin \
--sysconfdir=/config/comskip && \
make && \
make DESTDIR=/tmp/comskip-build install
############## runtime stage ##############
FROM lsiobase/alpine.armhf:3.8
# Add qemu to build on x86_64 systems
COPY qemu-arm-static /usr/bin
# set version label
ARG BUILD_DATE
ARG VERSION
LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}"
LABEL maintainer="saarg"
# environment settings
ENV HOME="/config"
RUN \
echo "**** install runtime packages ****" && \
apk add --no-cache \
bsd-compat-headers \
bzip2 \
curl \
ffmpeg \
ffmpeg-libs \
gzip \
libcrypto1.0 \
libcurl \
libdvbcsa \
libhdhomerun-libs \
libssl1.0 \
libva \
libva-intel-driver \
libvpx \
libxml2 \
libxslt \
linux-headers \
openssl \
opus \
pcre2 \
perl \
perl-archive-zip \
perl-boolean \
perl-capture-tiny \
perl-cgi \
perl-compress-raw-zlib \
perl-data-dumper \
perl-date-manip \
perl-datetime \
perl-datetime-format-strptime \
perl-datetime-timezone \
perl-dbd-sqlite \
perl-dbi \
perl-digest-sha1 \
perl-doc \
perl-file-slurp \
perl-file-temp \
perl-file-which \
perl-getopt-long \
perl-html-parser \
perl-html-tree \
perl-http-cookies \
perl-io \
perl-io-compress \
perl-io-html \
perl-io-socket-ssl \
perl-io-stringy \
perl-json \
perl-libwww \
perl-lingua-en-numbers-ordinate \
perl-lingua-preferred \
perl-list-moreutils \
perl-module-build \
perl-module-pluggable \
perl-net-ssleay \
perl-parse-recdescent \
perl-path-class \
perl-scalar-list-utils \
perl-term-progressbar \
perl-term-readkey \
perl-test-exception \
perl-test-requires \
perl-timedate \
perl-try-tiny \
perl-unicode-string \
perl-xml-libxml \
perl-xml-libxslt \
perl-xml-parser \
perl-xml-sax \
perl-xml-treepp \
perl-xml-twig \
perl-xml-writer \
python \
tar \
uriparser \
wget \
x264 \
x265 \
zlib && \
apk add --no-cache \
--repository http://nl.alpinelinux.org/alpine/edge/testing \
gnu-libiconv
# copy local files and buildstage artifacts
COPY --from=buildstage /tmp/argtable-build/usr/ /usr/
COPY --from=buildstage /tmp/comskip-build/usr/ /usr/
COPY --from=buildstage /tmp/tvheadend-build/usr/ /usr/
COPY --from=buildstage /tmp/xmltv-build/usr/ /usr/
COPY --from=buildstage /usr/local/share/man/ /usr/local/share/man/
COPY --from=buildstage /usr/local/share/perl5/ /usr/local/share/perl5/
COPY root/ /
# add picons
ADD picons.tar.bz2 /picons
# ports and volumes
EXPOSE 9981 9982
VOLUME /config /recordings

603
Jenkinsfile vendored Normal file
View File

@@ -0,0 +1,603 @@
pipeline {
agent {
label 'X86-64-MULTI'
}
// Input to determine if this is a package check
parameters {
string(defaultValue: 'false', description: 'package check run', name: 'PACKAGE_CHECK')
}
// Configuration for the variables used for this specific repo
environment {
BUILDS_DISCORD=credentials('build_webhook_url')
GITHUB_TOKEN=credentials('498b4638-2d02-4ce5-832d-8a57d01d97ab')
EXT_GIT_BRANCH = 'master'
EXT_USER = 'tvheadend'
EXT_REPO = 'tvheadend'
BUILD_VERSION_ARG = 'TVHEADEND_COMMIT'
LS_USER = 'linuxserver'
LS_REPO = 'docker-tvheadend'
CONTAINER_NAME = 'tvheadend'
DOCKERHUB_IMAGE = 'linuxserver/tvheadend'
DEV_DOCKERHUB_IMAGE = 'lsiodev/tvheadend'
PR_DOCKERHUB_IMAGE = 'lspipepr/tvheadend'
DIST_IMAGE = 'alpine'
MULTIARCH='true'
CI='true'
CI_WEB='true'
CI_PORT='9981'
CI_SSL='false'
CI_DELAY='120'
CI_DOCKERENV='TZ=US/Pacific'
CI_AUTH='user:password'
CI_WEBPATH=''
}
stages {
// Setup all the basic environment variables needed for the build
stage("Set ENV Variables base"){
steps{
script{
env.EXIT_STATUS = ''
env.LS_RELEASE = sh(
script: '''curl -s https://api.github.com/repos/${LS_USER}/${LS_REPO}/releases/latest | jq -r '. | .tag_name' ''',
returnStdout: true).trim()
env.LS_RELEASE_NOTES = sh(
script: '''git log -1 --pretty=%B | sed -E ':a;N;$!ba;s/\\r{0,1}\\n/\\\\n/g' ''',
returnStdout: true).trim()
env.GITHUB_DATE = sh(
script: '''date '+%Y-%m-%dT%H:%M:%S%:z' ''',
returnStdout: true).trim()
env.COMMIT_SHA = sh(
script: '''git rev-parse HEAD''',
returnStdout: true).trim()
env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/commit/' + env.GIT_COMMIT
env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DOCKERHUB_IMAGE + '/tags/'
env.PULL_REQUEST = env.CHANGE_ID
}
script{
env.LS_RELEASE_NUMBER = sh(
script: '''echo ${LS_RELEASE} |sed 's/^.*-ls//g' ''',
returnStdout: true).trim()
}
script{
env.LS_TAG_NUMBER = sh(
script: '''#! /bin/bash
tagsha=$(git rev-list -n 1 ${LS_RELEASE} 2>/dev/null)
if [ "${tagsha}" == "${COMMIT_SHA}" ]; then
echo ${LS_RELEASE_NUMBER}
elif [ -z "${GIT_COMMIT}" ]; then
echo ${LS_RELEASE_NUMBER}
else
echo $((${LS_RELEASE_NUMBER} + 1))
fi''',
returnStdout: true).trim()
}
}
}
/* #######################
Package Version Tagging
####################### */
// Grab the current package versions in Git to determine package tag
stage("Set Package tag"){
steps{
script{
env.PACKAGE_TAG = sh(
script: '''#!/bin/bash
if [ -e package_versions.txt ] ; then
cat package_versions.txt | md5sum | cut -c1-8
else
echo none
fi''',
returnStdout: true).trim()
}
}
}
/* ########################
External Release Tagging
######################## */
// If this is a github commit trigger determine the current commit at head
stage("Set ENV github_commit"){
steps{
script{
env.EXT_RELEASE = sh(
script: '''curl -s https://api.github.com/repos/${EXT_USER}/${EXT_REPO}/commits/${EXT_GIT_BRANCH} | jq -r '. | .sha' | cut -c1-8 ''',
returnStdout: true).trim()
}
}
}
// If this is a github commit trigger Set the external release link
stage("Set ENV commit_link"){
steps{
script{
env.RELEASE_LINK = 'https://github.com/' + env.EXT_USER + '/' + env.EXT_REPO + '/commit/' + env.EXT_RELEASE
}
}
}
// Sanitize the release tag and strip illegal docker or github characters
stage("Sanitize tag"){
steps{
script{
env.EXT_RELEASE_CLEAN = sh(
script: '''echo ${EXT_RELEASE} | sed 's/[~,%@+;:/]//g' ''',
returnStdout: true).trim()
}
}
}
// If this is a master build use live docker endpoints
stage("Set ENV live build"){
when {
branch "master"
environment name: 'CHANGE_ID', value: ''
}
steps {
script{
env.IMAGE = env.DOCKERHUB_IMAGE
if (env.MULTIARCH == 'true') {
env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + '|arm32v6-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER
} else {
env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER
}
env.META_TAG = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER
}
}
}
// If this is a dev build use dev docker endpoints
stage("Set ENV dev build"){
when {
not {branch "master"}
environment name: 'CHANGE_ID', value: ''
}
steps {
script{
env.IMAGE = env.DEV_DOCKERHUB_IMAGE
if (env.MULTIARCH == 'true') {
env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '|arm32v6-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA
} else {
env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA
}
env.META_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA
env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DEV_DOCKERHUB_IMAGE + '/tags/'
}
}
}
// If this is a pull request build use dev docker endpoints
stage("Set ENV PR build"){
when {
not {environment name: 'CHANGE_ID', value: ''}
}
steps {
script{
env.IMAGE = env.PR_DOCKERHUB_IMAGE
if (env.MULTIARCH == 'true') {
env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST + '|arm32v6-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST
} else {
env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST
}
env.META_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST
env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/pull/' + env.PULL_REQUEST
env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.PR_DOCKERHUB_IMAGE + '/tags/'
}
}
}
// Use helper containers to render templated files
stage('Update-Templates') {
when {
branch "master"
environment name: 'CHANGE_ID', value: ''
expression {
env.CONTAINER_NAME != null
}
}
steps {
sh '''#! /bin/bash
set -e
TEMPDIR=$(mktemp -d)
docker pull linuxserver/jenkins-builder:latest
docker run --rm -e CONTAINER_NAME=${CONTAINER_NAME} -e GITHUB_BRANCH=master -v ${TEMPDIR}:/ansible/jenkins linuxserver/jenkins-builder:latest
docker pull linuxserver/doc-builder:latest
docker run --rm -e CONTAINER_NAME=${CONTAINER_NAME} -e GITHUB_BRANCH=master -v ${TEMPDIR}:/ansible/readme linuxserver/doc-builder:latest
if [ "$(md5sum ${TEMPDIR}/${LS_REPO}/Jenkinsfile | awk '{ print $1 }')" != "$(md5sum Jenkinsfile | awk '{ print $1 }')" ] || [ "$(md5sum ${TEMPDIR}/${CONTAINER_NAME}/README.md | awk '{ print $1 }')" != "$(md5sum README.md | awk '{ print $1 }')" ]; then
mkdir -p ${TEMPDIR}/repo
git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO}
git --git-dir ${TEMPDIR}/repo/${LS_REPO}/.git checkout -f master
cp ${TEMPDIR}/${CONTAINER_NAME}/README.md ${TEMPDIR}/repo/${LS_REPO}/
cp ${TEMPDIR}/docker-${CONTAINER_NAME}/Jenkinsfile ${TEMPDIR}/repo/${LS_REPO}/
cd ${TEMPDIR}/repo/${LS_REPO}/
git --git-dir ${TEMPDIR}/repo/${LS_REPO}/.git add Jenkinsfile README.md
git --git-dir ${TEMPDIR}/repo/${LS_REPO}/.git commit -m 'Bot Updating Templated Files'
git --git-dir ${TEMPDIR}/repo/${LS_REPO}/.git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git --all
echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER}
else
echo "false" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER}
fi
rm -Rf ${TEMPDIR}'''
script{
env.FILES_UPDATED = sh(
script: '''cat /tmp/${COMMIT_SHA}-${BUILD_NUMBER}''',
returnStdout: true).trim()
}
}
}
// Exit the build if the Templated files were just updated
stage('Template-exit') {
when {
branch "master"
environment name: 'CHANGE_ID', value: ''
environment name: 'FILES_UPDATED', value: 'true'
expression {
env.CONTAINER_NAME != null
}
}
steps {
script{
env.EXIT_STATUS = 'ABORTED'
}
}
}
/* ###############
Build Container
############### */
// Build Docker container for push to LS Repo
stage('Build-Single') {
when {
environment name: 'MULTIARCH', value: 'false'
environment name: 'EXIT_STATUS', value: ''
}
steps {
sh "docker build --no-cache -t ${IMAGE}:${META_TAG} \
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${META_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
}
}
// Build MultiArch Docker containers for push to LS Repo
stage('Build-Multi') {
when {
environment name: 'MULTIARCH', value: 'true'
environment name: 'EXIT_STATUS', value: ''
}
parallel {
stage('Build X86') {
steps {
sh "docker build --no-cache -t ${IMAGE}:amd64-${META_TAG} \
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${META_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
}
}
stage('Build ARMHF') {
agent {
label 'ARMHF'
}
steps {
withCredentials([
[
$class: 'UsernamePasswordMultiBinding',
credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207',
usernameVariable: 'DOCKERUSER',
passwordVariable: 'DOCKERPASS'
]
]) {
echo 'Logging into DockerHub'
sh '''#! /bin/bash
echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin
'''
sh "curl https://lsio-ci.ams3.digitaloceanspaces.com/qemu-arm-static -o qemu-arm-static"
sh "chmod +x qemu-*"
sh "docker build --no-cache -f Dockerfile.armhf -t ${IMAGE}:arm32v6-${META_TAG} \
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${META_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
sh "docker tag ${IMAGE}:arm32v6-${META_TAG} lsiodev/buildcache:arm32v6-${COMMIT_SHA}-${BUILD_NUMBER}"
sh "docker push lsiodev/buildcache:arm32v6-${COMMIT_SHA}-${BUILD_NUMBER}"
}
}
}
stage('Build ARM64') {
agent {
label 'ARM64'
}
steps {
withCredentials([
[
$class: 'UsernamePasswordMultiBinding',
credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207',
usernameVariable: 'DOCKERUSER',
passwordVariable: 'DOCKERPASS'
]
]) {
echo 'Logging into DockerHub'
sh '''#! /bin/bash
echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin
'''
sh "curl https://lsio-ci.ams3.digitaloceanspaces.com/qemu-aarch64-static -o qemu-aarch64-static"
sh "chmod +x qemu-*"
sh "docker build --no-cache -f Dockerfile.aarch64 -t ${IMAGE}:arm64v8-${META_TAG} \
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${META_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
sh "docker tag ${IMAGE}:arm64v8-${META_TAG} lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}"
sh "docker push lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}"
}
}
}
}
}
// Take the image we just built and dump package versions for comparison
stage('Update-packages') {
when {
branch "master"
environment name: 'CHANGE_ID', value: ''
environment name: 'EXIT_STATUS', value: ''
}
steps {
sh '''#! /bin/bash
set -e
TEMPDIR=$(mktemp -d)
if [ "${MULTIARCH}" == "true" ]; then
LOCAL_CONTAINER=${IMAGE}:amd64-${META_TAG}
else
LOCAL_CONTAINER=${IMAGE}:${META_TAG}
fi
if [ "${DIST_IMAGE}" == "alpine" ]; then
docker run --rm --entrypoint '/bin/sh' -v ${TEMPDIR}:/tmp ${LOCAL_CONTAINER} -c '\
apk info > packages && \
apk info -v > versions && \
paste -d " " packages versions > /tmp/package_versions.txt && \
chmod 777 /tmp/package_versions.txt'
elif [ "${DIST_IMAGE}" == "ubuntu" ]; then
docker run --rm --entrypoint '/bin/sh' -v ${TEMPDIR}:/tmp ${LOCAL_CONTAINER} -c '\
apt list -qq --installed > /tmp/package_versions.txt && \
chmod 777 /tmp/package_versions.txt'
fi
NEW_PACKAGE_TAG=$(md5sum ${TEMPDIR}/package_versions.txt | cut -c1-8 )
echo "Package tag sha from current packages in buit container is ${NEW_PACKAGE_TAG} comparing to old ${PACKAGE_TAG} from github"
if [ "${NEW_PACKAGE_TAG}" != "${PACKAGE_TAG}" ]; then
git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/${LS_REPO}
git --git-dir ${TEMPDIR}/${LS_REPO}/.git checkout -f master
cp ${TEMPDIR}/package_versions.txt ${TEMPDIR}/${LS_REPO}/
cd ${TEMPDIR}/${LS_REPO}/
wait
git add package_versions.txt
git commit -m 'Bot Updating Package Versions'
git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git --all
echo "true" > /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER}
echo "Package tag updated, stopping build process"
else
echo "false" > /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER}
echo "Package tag is same as previous continue with build process"
fi
rm -Rf ${TEMPDIR}'''
script{
env.PACKAGE_UPDATED = sh(
script: '''cat /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER}''',
returnStdout: true).trim()
}
}
}
// Exit the build if the package file was just updated
stage('PACKAGE-exit') {
when {
branch "master"
environment name: 'CHANGE_ID', value: ''
environment name: 'PACKAGE_UPDATED', value: 'true'
environment name: 'EXIT_STATUS', value: ''
}
steps {
script{
env.EXIT_STATUS = 'ABORTED'
}
}
}
// Exit the build if this is just a package check and there are no changes to push
stage('PACKAGECHECK-exit') {
when {
branch "master"
environment name: 'CHANGE_ID', value: ''
environment name: 'PACKAGE_UPDATED', value: 'false'
environment name: 'EXIT_STATUS', value: ''
expression {
params.PACKAGE_CHECK == 'true'
}
}
steps {
script{
env.EXIT_STATUS = 'ABORTED'
}
}
}
/* #######
Testing
####### */
// Run Container tests
stage('Test') {
when {
environment name: 'CI', value: 'true'
environment name: 'EXIT_STATUS', value: ''
}
steps {
withCredentials([
string(credentialsId: 'spaces-key', variable: 'DO_KEY'),
string(credentialsId: 'spaces-secret', variable: 'DO_SECRET')
]) {
script{
env.CI_URL = 'https://lsio-ci.ams3.digitaloceanspaces.com/' + env.IMAGE + '/' + env.META_TAG + '/index.html'
}
sh '''#! /bin/bash
set -e
docker pull lsiodev/ci:latest
if [ "${MULTIARCH}" == "true" ]; then
docker pull lsiodev/buildcache:arm32v6-${COMMIT_SHA}-${BUILD_NUMBER}
docker pull lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}
docker tag lsiodev/buildcache:arm32v6-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm32v6-${META_TAG}
docker tag lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG}
fi
docker run --rm \
-v /var/run/docker.sock:/var/run/docker.sock \
-e IMAGE=\"${IMAGE}\" \
-e DELAY_START=\"${CI_DELAY}\" \
-e TAGS=\"${CI_TAGS}\" \
-e META_TAG=\"${META_TAG}\" \
-e PORT=\"${CI_PORT}\" \
-e SSL=\"${CI_SSL}\" \
-e BASE=\"${DIST_IMAGE}\" \
-e SECRET_KEY=\"${DO_SECRET}\" \
-e ACCESS_KEY=\"${DO_KEY}\" \
-e DOCKER_ENV=\"${CI_DOCKERENV}\" \
-e WEB_SCREENSHOT=\"${CI_WEB}\" \
-e WEB_AUTH=\"${CI_AUTH}\" \
-e WEB_PATH=\"${CI_WEBPATH}\" \
-e DO_REGION="ams3" \
-e DO_BUCKET="lsio-ci" \
-t lsiodev/ci:latest \
python /ci/ci.py'''
}
}
}
/* ##################
Release Logic
################## */
// If this is an amd64 only image only push a single image
stage('Docker-Push-Single') {
when {
environment name: 'MULTIARCH', value: 'false'
environment name: 'EXIT_STATUS', value: ''
}
steps {
withCredentials([
[
$class: 'UsernamePasswordMultiBinding',
credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207',
usernameVariable: 'DOCKERUSER',
passwordVariable: 'DOCKERPASS'
]
]) {
echo 'Logging into DockerHub'
sh '''#! /bin/bash
echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin
'''
sh "docker tag ${IMAGE}:${META_TAG} ${IMAGE}:latest"
sh "docker push ${IMAGE}:latest"
sh "docker push ${IMAGE}:${META_TAG}"
}
}
}
// If this is a multi arch release push all images and define the manifest
stage('Docker-Push-Multi') {
when {
environment name: 'MULTIARCH', value: 'true'
environment name: 'EXIT_STATUS', value: ''
}
steps {
withCredentials([
[
$class: 'UsernamePasswordMultiBinding',
credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207',
usernameVariable: 'DOCKERUSER',
passwordVariable: 'DOCKERPASS'
]
]) {
sh '''#! /bin/bash
echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin
'''
sh '''#! /bin/bash
if [ "${CI}" == "false" ]; then
docker pull lsiodev/buildcache:arm32v6-${COMMIT_SHA}-${BUILD_NUMBER}
docker pull lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}
docker tag lsiodev/buildcache:arm32v6-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm32v6-${META_TAG}
docker tag lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG}
fi'''
sh "docker tag ${IMAGE}:amd64-${META_TAG} ${IMAGE}:amd64-latest"
sh "docker tag ${IMAGE}:arm32v6-${META_TAG} ${IMAGE}:arm32v6-latest"
sh "docker tag ${IMAGE}:arm64v8-${META_TAG} ${IMAGE}:arm64v8-latest"
sh "docker push ${IMAGE}:amd64-${META_TAG}"
sh "docker push ${IMAGE}:arm32v6-${META_TAG}"
sh "docker push ${IMAGE}:arm64v8-${META_TAG}"
sh "docker push ${IMAGE}:amd64-latest"
sh "docker push ${IMAGE}:arm32v6-latest"
sh "docker push ${IMAGE}:arm64v8-latest"
sh "docker manifest push --purge ${IMAGE}:latest || :"
sh "docker manifest create ${IMAGE}:latest ${IMAGE}:amd64-latest ${IMAGE}:arm32v6-latest ${IMAGE}:arm64v8-latest"
sh "docker manifest annotate ${IMAGE}:latest ${IMAGE}:arm32v6-latest --os linux --arch arm"
sh "docker manifest annotate ${IMAGE}:latest ${IMAGE}:arm64v8-latest --os linux --arch arm64 --variant v8"
sh "docker manifest push --purge ${IMAGE}:${META_TAG} || :"
sh "docker manifest create ${IMAGE}:${META_TAG} ${IMAGE}:amd64-${META_TAG} ${IMAGE}:arm32v6-${META_TAG} ${IMAGE}:arm64v8-${META_TAG}"
sh "docker manifest annotate ${IMAGE}:${META_TAG} ${IMAGE}:arm32v6-${META_TAG} --os linux --arch arm"
sh "docker manifest annotate ${IMAGE}:${META_TAG} ${IMAGE}:arm64v8-${META_TAG} --os linux --arch arm64 --variant v8"
sh "docker manifest push --purge ${IMAGE}:latest"
sh "docker manifest push --purge ${IMAGE}:${META_TAG}"
}
}
}
// If this is a public release tag it in the LS Github
stage('Github-Tag-Push-Release') {
when {
branch "master"
expression {
env.LS_RELEASE != env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-ls' + env.LS_TAG_NUMBER
}
environment name: 'CHANGE_ID', value: ''
environment name: 'EXIT_STATUS', value: ''
}
steps {
echo "Pushing New tag for current commit ${EXT_RELEASE_CLEAN}-pkg-${PACKAGE_TAG}-ls${LS_TAG_NUMBER}"
sh '''curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/git/tags \
-d '{"tag":"'${EXT_RELEASE_CLEAN}'-pkg-'${PACKAGE_TAG}'-ls'${LS_TAG_NUMBER}'",\
"object": "'${COMMIT_SHA}'",\
"message": "Tagging Release '${EXT_RELEASE_CLEAN}'-pkg-'${PACKAGE_TAG}'-ls'${LS_TAG_NUMBER}' to master",\
"type": "commit",\
"tagger": {"name": "LinuxServer Jenkins","email": "jenkins@linuxserver.io","date": "'${GITHUB_DATE}'"}}' '''
echo "Pushing New release for Tag"
sh '''#! /bin/bash
curl -s https://api.github.com/repos/${EXT_USER}/${EXT_REPO}/commits/${EXT_GIT_BRANCH} | jq '. | .commit.message' | sed 's:^.\\(.*\\).$:\\1:' > releasebody.json
echo '{"tag_name":"'${EXT_RELEASE_CLEAN}'-pkg-'${PACKAGE_TAG}'-ls'${LS_TAG_NUMBER}'",\
"target_commitish": "master",\
"name": "'${EXT_RELEASE_CLEAN}'-pkg-'${PACKAGE_TAG}'-ls'${LS_TAG_NUMBER}'",\
"body": "**LinuxServer Changes:**\\n\\n'${LS_RELEASE_NOTES}'\\n**'${EXT_REPO}' Changes:**\\n\\n' > start
printf '","draft": false,"prerelease": false}' >> releasebody.json
paste -d'\\0' start releasebody.json > releasebody.json.done
curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/releases -d @releasebody.json.done'''
}
}
// Use helper container to sync the current README on master to the dockerhub endpoint
stage('Sync-README') {
when {
environment name: 'CHANGE_ID', value: ''
environment name: 'EXIT_STATUS', value: ''
}
steps {
withCredentials([
[
$class: 'UsernamePasswordMultiBinding',
credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207',
usernameVariable: 'DOCKERUSER',
passwordVariable: 'DOCKERPASS'
]
]) {
sh '''#! /bin/bash
docker pull lsiodev/readme-sync
docker run --rm=true \
-e DOCKERHUB_USERNAME=$DOCKERUSER \
-e DOCKERHUB_PASSWORD=$DOCKERPASS \
-e GIT_REPOSITORY=${LS_USER}/${LS_REPO} \
-e DOCKER_REPOSITORY=${IMAGE} \
-e GIT_BRANCH=master \
lsiodev/readme-sync bash -c 'node sync' '''
}
}
}
}
/* ######################
Send status to Discord
###################### */
post {
always {
script{
if (env.EXIT_STATUS == "ABORTED"){
sh 'echo "build aborted"'
}
else if (currentBuild.currentResult == "SUCCESS"){
sh ''' curl -X POST --data '{"avatar_url": "https://wiki.jenkins-ci.org/download/attachments/2916393/headshot.png","embeds": [{"color": 1681177,\
"description": "**Build:** '${BUILD_NUMBER}'\\n**CI Results:** '${CI_URL}'\\n**Status:** Success\\n**Job:** '${RUN_DISPLAY_URL}'\\n**Change:** '${CODE_URL}'\\n**External Release:**: '${RELEASE_LINK}'\\n**DockerHub:** '${DOCKERHUB_LINK}'\\n"}],\
"username": "Jenkins"}' ${BUILDS_DISCORD} '''
}
else {
sh ''' curl -X POST --data '{"avatar_url": "https://wiki.jenkins-ci.org/download/attachments/2916393/headshot.png","embeds": [{"color": 16711680,\
"description": "**Build:** '${BUILD_NUMBER}'\\n**CI Results:** '${CI_URL}'\\n**Status:** failure\\n**Job:** '${RUN_DISPLAY_URL}'\\n**Change:** '${CODE_URL}'\\n**External Release:**: '${RELEASE_LINK}'\\n**DockerHub:** '${DOCKERHUB_LINK}'\\n"}],\
"username": "Jenkins"}' ${BUILDS_DISCORD} '''
}
}
}
}
}

View File

@@ -56,7 +56,7 @@ If you use IPTV, SAT>IP or HDHomeRun, you need to create the container with --ne
## Parameters
`The parameters are split into two halves, separated by a colon, the left hand side representing the host and the right the container side.
`The parameters are split into two halves, separated by a colon, the left hand side representing the host and the right the container side.
For example with a port -p external:internal - what this shows is the port mapping from internal to external of the container.
So -p 8080:80 would expose port 80 from inside the container to be accessible from the host's IP on port 8080
http://192.168.x.x:8080 would show you what's running INSIDE the container on port 80.`
@@ -109,7 +109,7 @@ The first thing to do is to run the setup wizard. If it doesn't pop up at first
**Configuring XMLTV grabber**
To configure the XMLTV grabber, first check if your grabber is listed in Configuration --> Channel/EPG --> EPG Grabber Modules. If it's listed, you will have to configure the grabber before enabling.
Find the path in the path field of your grabber. We will use the last part. It starts with tv_grab_. Add it after /usr/bin/ in the below command. There should be no space between Usr/bin/ and the part you added.
Find the path in the path field of your grabber. We will use the last part. It starts with tv_grab_. Add it after /usr/bin/ in the below command. There should be no space between Usr/bin/ and the part you added.
```
docker exec -it -u abc tvheadend /usr/bin/for_you_to_fill_out --configure
@@ -152,7 +152,7 @@ You need to enable minimum advanced view level to see the picons options.
* Shell access whilst the container is running: `docker exec -it tvheadend /bin/bash`
* To monitor the logs of the container in realtime: `docker logs -f tvheadend`
* container version number
* container version number
`docker inspect -f '{{ index .Config.Labels "build_version" }}' tvheadend`
@@ -162,6 +162,7 @@ You need to enable minimum advanced view level to see the picons options.
## Versions
+ **15.01.19:** Add pipeline and multi arch logic.
+ **12.09.18:** Rebase to alpine 3.8 and use buildstage type build.
+ **21.04.18:** Add JSON::XS Perl package for grab_tv_huro.
+ **24.03.18:** Add dvbcsa package.

29
jenkins-vars.yml Normal file
View File

@@ -0,0 +1,29 @@
---
# jenkins variables
project_name: docker-tvheadend
external_type: github_commit
release_type: stable
release_tag: latest
ls_branch: master
repo_vars:
- EXT_GIT_BRANCH = 'master'
- EXT_USER = 'tvheadend'
- EXT_REPO = 'tvheadend'
- BUILD_VERSION_ARG = 'TVHEADEND_COMMIT'
- LS_USER = 'linuxserver'
- LS_REPO = 'docker-tvheadend'
- CONTAINER_NAME = 'tvheadend'
- DOCKERHUB_IMAGE = 'linuxserver/tvheadend'
- DEV_DOCKERHUB_IMAGE = 'lsiodev/tvheadend'
- PR_DOCKERHUB_IMAGE = 'lspipepr/tvheadend'
- DIST_IMAGE = 'alpine'
- MULTIARCH='true'
- CI='true'
- CI_WEB='true'
- CI_PORT='9981'
- CI_SSL='false'
- CI_DELAY='120'
- CI_DOCKERENV='TZ=US/Pacific'
- CI_AUTH='user:password'
- CI_WEBPATH=''

197
readme-vars.yml Normal file
View File

@@ -0,0 +1,197 @@
---
# project information
project_name: tvheadend
full_custom_readme: |
{% raw -%}
[linuxserverurl]: https://linuxserver.io
[forumurl]: https://forum.linuxserver.io
[ircurl]: https://www.linuxserver.io/irc/
[podcasturl]: https://www.linuxserver.io/podcast/
[appurl]: https://www.tvheadend.org/
[hub]: https://hub.docker.com/r/linuxserver/tvheadend/
[![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)][linuxserverurl]
The [LinuxServer.io][linuxserverurl] team brings you another container release featuring easy user mapping and community support. Find us for support at:
* [forum.linuxserver.io][forumurl]
* [IRC][ircurl] on freenode at `#linuxserver.io`
* [Podcast][podcasturl] covers everything to do with getting the most from your Linux Server plus a focus on all things Docker and containerisation!
# linuxserver/tvheadend
[![](https://images.microbadger.com/badges/version/linuxserver/tvheadend.svg)](https://microbadger.com/images/linuxserver/tvheadend "Get your own version badge on microbadger.com")[![](https://images.microbadger.com/badges/image/linuxserver/tvheadend.svg)](https://microbadger.com/images/linuxserver/tvheadend "Get your own image badge on microbadger.com")[![Docker Pulls](https://img.shields.io/docker/pulls/linuxserver/tvheadend.svg)][hub][![Docker Stars](https://img.shields.io/docker/stars/linuxserver/tvheadend.svg)][hub][![Build Status](https://ci.linuxserver.io/buildStatus/icon?job=Docker-Builders/x86-64/x86-64-tvheadend)](https://ci.linuxserver.io/job/Docker-Builders/job/x86-64/job/x86-64-tvheadend/)
[Tvheadend](https://www.tvheadend.org/) is a TV streaming server and recorder for Linux, FreeBSD and Android supporting DVB-S, DVB-S2, DVB-C, DVB-T, ATSC, ISDB-T, IPTV, SAT>IP and HDHomeRun as input sources.
Tvheadend offers the HTTP (VLC, MPlayer), HTSP (Kodi, Movian) and SAT>IP streaming.
Multiple EPG sources are supported (over-the-air DVB and ATSC including OpenTV DVB extensions, XMLTV, PyXML).
[![tvheadend](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/tvheadend-big.png)][appurl]
## Usage
```
docker create \
--name=tvheadend \
--net=bridge \
-v <path to data>:/config \
-v <path to recordings>:/recordings \
-e PGID=<gid> -e PUID=<uid> \
-e RUN_OPTS=<parameter> \
-p 9981:9981 \
-p 9982:9982 \
--device=/dev/dvb \
--device=/dev/dri
linuxserver/tvheadend
```
The --device=/dev/dvb is only needed if you want to pass through a DVB card to the container. If you use IPTV or HDHomeRun you can leave it out.
The --device=/dev/dri is only needed if you want to use your AMD/Intel GPU for hardware accelerated video encoding (vaapi).
You can choose between ,using tags, latest (default, and no tag required or a specific release branch of tvheadend.
Add one of the tags, if required, to the linuxserver/tvheadend line of the run/create command in the following format, linuxserver/tvheadend:release-4.2
#### Tags
+ **release-4.2** : latest release from 4.2 branch. Freshly built every friday night uk time.
+ **stable-4.2.1** : old stable version. Will not be updated anymore!
+ **stable-4.0.9** : old stable version. Will not be updated anymore!
#### Host vs. Bridge
If you use IPTV, SAT>IP or HDHomeRun, you need to create the container with --net=host and remove the -p flags. This is because of a limitation in docker and multicast.
## Parameters
`The parameters are split into two halves, separated by a colon, the left hand side representing the host and the right the container side.
For example with a port -p external:internal - what this shows is the port mapping from internal to external of the container.
So -p 8080:80 would expose port 80 from inside the container to be accessible from the host's IP on port 8080
http://192.168.x.x:8080 would show you what's running INSIDE the container on port 80.`
* `-p 1234` - the port(s)
* `-v /config` - Where TVHeadend show store it's config files
* `-v /recordings` - Where you want the PVR to store recordings
* `-e PGID` for GroupID - see below for explanation
* `-e PUID` for UserID - see below for explanation
* `-e RUN_OPTS` additional runtime parameters - see below for explanation
* `--device=/dev/dvb` - for passing through DVB-cards
* `--device=/dev/dri` - for passing through GPU
* `--net=host` - for IPTV, SAT>IP and HDHomeRun
* `-e TZ` - for timezone information *eg Europe/London, etc*
It is based on alpine linux with s6 overlay, for shell access whilst the container is running do `docker exec -it tvheadend /bin/bash`.
### User / Group Identifiers
Sometimes when using data volumes (`-v` flags) permissions issues can arise between the host OS and the container. We avoid this issue by allowing you to specify the user `PUID` and group `PGID`. Ensure the data volume directory on the host is owned by the same user you specify and it will "just work" ™.
In this instance `PUID=1001` and `PGID=1001`. To find yours use `id user` as below:
```
$ id <dockeruser>
uid=1001(dockeruser) gid=1001(dockergroup) groups=1001(dockergroup)
```
## Additional runtime parameters
In some cases it might be necessary to start tvheadend with additional parameters, for example to enable debugging or specify webroot for reverse proxy. Be sure to have the right parameters set, as adding the wrong once might lead to the container not starting correctly.
## Setting up the application
The setup depends if you run the one of the stable tags or use latest. Running latest is the easiest as it has a setup wizard.
**Stable**
First thing to do is to go to Configuration --> DVB Inputs --> TV adapters and add your LNB/switch info. Then create a new network in the Networks tab and set the correct pre-defined muxes and orbital position.
Go back to the TV adapters tab and add the newly created network under universal LNB. Go back to the Networks tab and mark the network you created earlier and press the Force Scan button. Tvheadend will now scan the muxes for services.
After the scan is done, head to the Services tab and find the services you want as channels, mark them, and press map services. They should now appear under Configuration --> Channel/EPG.
**Latest**
The first thing to do is to run the setup wizard. If it doesn't pop up at first login, you can find it in Configuration --> General --> Base and click Start Wizard. This will guide you to set up the basic parts of tvheadend.
**Configuring XMLTV grabber**
To configure the XMLTV grabber, first check if your grabber is listed in Configuration --> Channel/EPG --> EPG Grabber Modules. If it's listed, you will have to configure the grabber before enabling.
Find the path in the path field of your grabber. We will use the last part. It starts with tv_grab_. Add it after /usr/bin/ in the below command. There should be no space between Usr/bin/ and the part you added.
```
docker exec -it -u abc tvheadend /usr/bin/for_you_to_fill_out --configure
```
Now follow the onscreen progress. If you get asked about cache, just accept the default. After you have configured your grabber, you can go back and enable your grabber.
If you allready have a configuration file, you can add it in the .xmltv folder where you mapped the /config volume. If it's not created, create it.
**Comskip**
This container comes with Comskip for commercial flagging of recordings. This you have to add in the recording config of tvheadend.
Go to Configuration --> Recording. Change the view level to advanced in the top right corner, and add the below in the Post-processor command field.
```
/usr/bin/comskip --ini=/config/comskip/comskip.ini "%f"
```
Now comskip will run after each recording is finished. You will find comskip.ini in the comskip folder of your /config volume mapping. See the [Comskip](http://www.kaashoek.com/comskip/) homepage for tuning of the ini file.
**FFmpeg**
FFmpeg is installed in /usr/bin/ in case you need to use it with pipe.
**EPG XML file**
If you have EPG data in XML format from a supplier, you can drop it in the data folder of your /config volume mapping. If it doesn't exist, create it. Then choose the XML file grabber in Configuration --> Channel/EPG --> EPG Grabber Modules.
If you use WebGrab+Plus, choose the WebGrab+Plus XML file grabber. The XML file goes in the same path as above.
The xml file has to be named guide.xml.
For advanced setup of tvheadend, go to [Tvheadend][appurl]
**Picons**
We have added all the picons from [picons.xyz](https://picons.xyz/) in the folder /picons. To enable the use of these picons, add the path to the Channel icon path in Configuration --> General --> Base.
You need to enable minimum advanced view level to see the picons options.
## Info
* Shell access whilst the container is running: `docker exec -it tvheadend /bin/bash`
* To monitor the logs of the container in realtime: `docker logs -f tvheadend`
* container version number
`docker inspect -f '{{ index .Config.Labels "build_version" }}' tvheadend`
* image version number
`docker inspect -f '{{ index .Config.Labels "build_version" }}' linuxserver/tvheadend`
## Versions
+ **15.01.19:** Add pipeline and multi arch logic.
+ **12.09.18:** Rebase to alpine 3.8 and use buildstage type build.
+ **21.04.18:** Add JSON::XS Perl package for grab_tv_huro.
+ **24.03.18:** Add dvbcsa package.
+ **04.03.18:** Use sourceforge master rather than mirror for xmltv.
+ **22.02.18:** Add lost libva-intel-driver.
+ **21.02.18:** Fix wrong version of iconv used.
+ **18.02.18:** Add vaapi support, some cleanup and dropping of deprecated options.
+ **04.01.18:** Deprecate cpu_core routine lack of scaling.
+ **11.12.17:** Rebase to alpine 3.7, linting fixes.
+ **02.09.17:** Add codec dependencies.
+ **13.07.17:** Increase uniformity across all archs.
+ **08.07.17:** Update README with full path for comskip.
+ **02.07.17:** Move to one branch for all 4.2 releases.
+ **27.05.17:** Rebase to alpine 3.6.
+ **01.05.17:** Update to tvheadend 4.2.1 stable.
+ **18.04.17:** Use repo version of gnu-libiconv rather than compiling.
+ **09.04.17:** Chain cpanm installs in one block and use --installdeps.
+ **09.02.17:** Perl changes, add picons file to gitignore and update XMLTV to 0.5.69.
+ **07.02.17:** Add variable to add additional runtime paramters.
+ **05.02.17:** Update to alpine 3.5 and change dvb-apps to only compile needed libs.
+ **14.11.16:** Add picons from picons.xyz to /picons folder and add info to README.
+ **22.09.16:** Fix broken tv_grab_wg, libs for xmltv and update README.
+ **18.09.16:** Update XMLTV to 0.5.68 and update README.
+ **10.09.16:** Add layer badges to README.
+ **05.09.16:** Initial release.
{%- endraw %}