mirror of
https://github.com/LibreELEC/LibreELEC.tv.git
synced 2025-07-28 13:16:41 +00:00
Merge pull request #4092 from MilhouseVH/le10_generator
buildsystem: switch to dynamic scheduler
This commit is contained in:
commit
80f1b14da0
108
config/functions
108
config/functions
@ -775,6 +775,11 @@ do_autoreconf() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# True if this is a sequential build, false if multithreaded
|
||||||
|
is_sequential_build() {
|
||||||
|
[ "${MTWITHLOCKS}" != "yes" ] && return 0 || return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
### PACKAGE HELPERS ###
|
### PACKAGE HELPERS ###
|
||||||
# get variable ($2) for package ($1).
|
# get variable ($2) for package ($1).
|
||||||
@ -1380,48 +1385,43 @@ enable_service() {
|
|||||||
|
|
||||||
|
|
||||||
### MULTI-THREADED FUNCTION HELPERS ###
|
### MULTI-THREADED FUNCTION HELPERS ###
|
||||||
# Test MTWITHLOCKS so that these functions are a no-op during non-multithreaded builds.
|
# flocks: 94 (pkg_lock_status)
|
||||||
|
# 95 (scripts/pkgbuild)
|
||||||
|
# 96 (acquire_exclusive_lock)
|
||||||
|
# 97 (acquire_update_lock)
|
||||||
|
# 98 (pkg_lock)
|
||||||
|
# 99 (scripts/get)
|
||||||
|
|
||||||
# Prevent concurrent modifications to a package (unpack) or
|
# Test build type so that these functions are a no-op during non-multithreaded builds.
|
||||||
# package:target (install/build).
|
|
||||||
|
# Prevent concurrent modifications to a package during certain activities.
|
||||||
|
# With dynamic scheduling we now only need to acquire the lock
|
||||||
|
# during unpack and reconf, all other activities do not need to acquire a
|
||||||
|
# lock as there should be no concurrent access however the existing code path
|
||||||
|
# potentially generates useful logging for minimal cost so keep it.
|
||||||
#
|
#
|
||||||
# If a package is already locked and the owner is ourselves
|
# If a package is already locked and the owner is ourselves
|
||||||
# then assume we already have the required lock.
|
# then assume we already have the required lock.
|
||||||
pkg_lock() {
|
pkg_lock() {
|
||||||
[ "${MTWITHLOCKS}" != "yes" ] && return 0
|
is_sequential_build && return 0
|
||||||
|
|
||||||
local pkg="$1" task="$2" parent_pkg="$3"
|
local pkg="$1" task="$2" parent_pkg="$3"
|
||||||
local this_job="${MTJOBID}"
|
local this_job="${MTJOBID}"
|
||||||
local lock_job lock_seq lock_task lock_pkg locked=no idwidth
|
local lock_job lock_seq lock_task lock_pkg locked=no idwidth
|
||||||
local fail_seq
|
|
||||||
|
|
||||||
exec 98>"${THREAD_CONTROL}/locks/${pkg}.${task}"
|
if [ "${task}" = "unpack" -o "${task}" = "reconf" ]; then
|
||||||
while [ : ]; do
|
exec 98>"${THREAD_CONTROL}/locks/${pkg}.${task}"
|
||||||
read -r lock_job lock_seq lock_task lock_pkg <<<$(cat "${THREAD_CONTROL}/locks/${pkg}.${task}.owner" 2>/dev/null)
|
while [ : ]; do
|
||||||
[ -n "${lock_job}" ] && break
|
read -r lock_job lock_seq lock_task lock_pkg <<<$(cat "${THREAD_CONTROL}/locks/${pkg}.${task}.owner" 2>/dev/null)
|
||||||
flock --wait 1 --exclusive 98 && locked=yes && break
|
[ -n "${lock_job}" ] && break
|
||||||
done
|
flock --wait 1 --exclusive 98 && locked=yes && break
|
||||||
|
done
|
||||||
|
|
||||||
if [ "${locked}" = "no" -a "${lock_job}/${lock_seq}" != "${this_job}/${PARALLEL_SEQ}" ]; then
|
if [ "${locked}" = "no" -a "${lock_job}/${lock_seq}" != "${this_job}/${PARALLEL_SEQ}" ]; then
|
||||||
[ "${THREADCOUNT}" = "0" ] && idwidth=${#MTMAXJOBS} || idwidth=2
|
[ "${THREADCOUNT}" = "0" ] && idwidth=${#MTMAXJOBS} || idwidth=2
|
||||||
pkg_lock_status "STALLED" "${parent_pkg}" "${task}" "$(printf "waiting on [%0*d] %s %s" ${idwidth} ${lock_job} "${lock_task}" "${lock_pkg}")"
|
pkg_lock_status "STALLED" "${parent_pkg}" "${task}" "$(printf "waiting on [%0*d] %s %s" ${idwidth} ${lock_job} "${lock_task}" "${lock_pkg}")"
|
||||||
flock --exclusive 98
|
flock --exclusive 98
|
||||||
fi
|
|
||||||
|
|
||||||
# As we now have the lock, if .failed still exists then a previous process must have failed
|
|
||||||
if [ -f "${THREAD_CONTROL}/locks/${pkg}.${task}.failed" ]; then
|
|
||||||
fail_seq="$(< "${THREAD_CONTROL}/locks/${pkg}.${task}.failed")"
|
|
||||||
print_color CLR_ERROR "FAILURE: ${pkg}.${task}.failed exists, a previous dependency process has failed (seq: ${fail_seq})\n"
|
|
||||||
if [ -d "${THREAD_CONTROL}/logs" ]; then
|
|
||||||
cat <<EOF
|
|
||||||
|
|
||||||
The following logs for this failure are available:
|
|
||||||
stdout: ${THREAD_CONTROL}/logs/${fail_seq}/stdout
|
|
||||||
stderr: ${THREAD_CONTROL}/logs/${fail_seq}/stderr
|
|
||||||
|
|
||||||
EOF
|
|
||||||
fi
|
fi
|
||||||
return 1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
pkg_lock_status "LOCKED" "${pkg}" "${task}"
|
pkg_lock_status "LOCKED" "${pkg}" "${task}"
|
||||||
@ -1429,7 +1429,7 @@ EOF
|
|||||||
|
|
||||||
# Log additional information for a locked package.
|
# Log additional information for a locked package.
|
||||||
pkg_lock_status() {
|
pkg_lock_status() {
|
||||||
[ "${MTWITHLOCKS}" != "yes" ] && return 0
|
is_sequential_build && return 0
|
||||||
|
|
||||||
local status="$1" pkg="$2" task="$3" msg="$4"
|
local status="$1" pkg="$2" task="$3" msg="$4"
|
||||||
local this_job="${MTJOBID}" line idwidth
|
local this_job="${MTJOBID}" line idwidth
|
||||||
@ -1439,6 +1439,12 @@ pkg_lock_status() {
|
|||||||
(
|
(
|
||||||
flock --exclusive 94
|
flock --exclusive 94
|
||||||
|
|
||||||
|
# Write the configured number of slots to history to improve accuracy of later analysis
|
||||||
|
if [ ! -f "${THREAD_CONTROL}/history" ]; then
|
||||||
|
printf "%s: <%06d> [%0*d/%0*d] %-7s %-7s %s %s\n" \
|
||||||
|
"$(date +%Y-%m-%d\ %H:%M:%S.%N)" $$ ${idwidth} 0 ${#MTMAXJOBS} 0 "IDLE" "config" "info" "slots=${MTMAXSLOT};jobs=${MTMAXJOBS}" >>"${THREAD_CONTROL}/history"
|
||||||
|
fi
|
||||||
|
|
||||||
printf -v line "%s: <%06d> [%0*d/%0*d] %-7s %-7s %-35s" \
|
printf -v line "%s: <%06d> [%0*d/%0*d] %-7s %-7s %-35s" \
|
||||||
"$(date +%Y-%m-%d\ %H:%M:%S.%N)" $$ ${idwidth} ${this_job} ${#MTMAXJOBS} ${PARALLEL_SEQ:-0} "${status}" "${task}" "${pkg}"
|
"$(date +%Y-%m-%d\ %H:%M:%S.%N)" $$ ${idwidth} ${this_job} ${#MTMAXJOBS} ${PARALLEL_SEQ:-0} "${status}" "${task}" "${pkg}"
|
||||||
[ -n "${msg}" ] && line+=" (${msg})"
|
[ -n "${msg}" ] && line+=" (${msg})"
|
||||||
@ -1451,31 +1457,29 @@ pkg_lock_status() {
|
|||||||
) 94>"${THREAD_CONTROL}/locks/.history"
|
) 94>"${THREAD_CONTROL}/locks/.history"
|
||||||
|
|
||||||
if [ "${status}" = "LOCKED" ]; then
|
if [ "${status}" = "LOCKED" ]; then
|
||||||
echo "${PARALLEL_SEQ}" > "${THREAD_CONTROL}/locks/${pkg}.${task}.failed"
|
|
||||||
echo "${this_job} ${PARALLEL_SEQ} ${task} ${pkg}" >"${THREAD_CONTROL}/locks/${pkg}.${task}.owner"
|
echo "${this_job} ${PARALLEL_SEQ} ${task} ${pkg}" >"${THREAD_CONTROL}/locks/${pkg}.${task}.owner"
|
||||||
elif [ "${status}" = "UNLOCK" ]; then
|
elif [ "${status}" = "UNLOCK" ]; then
|
||||||
rm "${THREAD_CONTROL}/locks/${pkg}.${task}.owner"
|
rm "${THREAD_CONTROL}/locks/${pkg}.${task}.owner"
|
||||||
rm "${THREAD_CONTROL}/locks/${pkg}.${task}.failed"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
update_dashboard() {
|
update_dashboard() {
|
||||||
[ "${MTWITHLOCKS}" != "yes" ] && return 0
|
is_sequential_build && return 0
|
||||||
|
|
||||||
local status="$1" pkg="$2" task="$3" msg="$4"
|
local status="$1" pkg="$2" task="$3" msg="$4"
|
||||||
local line sedline preamble num elapsed projdevarch
|
local line preamble num elapsed projdevarch
|
||||||
local boldred boldgreen boldyellow endcolor idwidth
|
local boldred boldgreen boldyellow endcolor idwidth
|
||||||
|
|
||||||
sedline=$((MTJOBID + 2))
|
[ "${THREADCOUNT}" = "0" ] && idwidth=${#MTMAXSLOT} || idwidth=2
|
||||||
|
|
||||||
[ "${THREADCOUNT}" = "0" ] && idwidth=${#MTMAXJOBS} || idwidth=2
|
if [ ! -s ${THREAD_CONTROL}/status ]; then
|
||||||
|
echo "" >"${THREAD_CONTROL}/status"
|
||||||
num=$(< "${THREAD_CONTROL}/status.max")
|
echo "" >>"${THREAD_CONTROL}/status"
|
||||||
if [ ${num} -lt ${sedline} ]; then
|
for i in $(seq 1 $((MTMAXSLOT))); do
|
||||||
echo ${sedline} >"${THREAD_CONTROL}/status.max"
|
printf "[%0*d/%0*d] %-7s\n" ${idwidth} ${i} ${#MTMAXJOBS} 0 "IDLE" >>"${THREAD_CONTROL}/status"
|
||||||
for i in $(seq $((num + 1)) ${sedline}); do echo "" >>"${THREAD_CONTROL}/status"; done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
num=$(< "${THREAD_CONTROL}/progress.prev")
|
num=$(< "${THREAD_CONTROL}/progress.prev")
|
||||||
@ -1506,13 +1510,13 @@ update_dashboard() {
|
|||||||
printf -v line "[%0*d\/%0*d] %b%-7s%b %-7s %-35s" ${idwidth} ${MTJOBID} ${#MTMAXJOBS} ${PARALLEL_SEQ:-0} "${color}" "${status//\//\\/}" "${endcolor}" "${task}" "${pkg}"
|
printf -v line "[%0*d\/%0*d] %b%-7s%b %-7s %-35s" ${idwidth} ${MTJOBID} ${#MTMAXJOBS} ${PARALLEL_SEQ:-0} "${color}" "${status//\//\\/}" "${endcolor}" "${task}" "${pkg}"
|
||||||
[ -n "${msg}" ] && line+=" ${msg//\//\\/}"
|
[ -n "${msg}" ] && line+=" ${msg//\//\\/}"
|
||||||
|
|
||||||
sed -e "1s/.*/${preamble}/;${sedline}s/.*/${line}/" -i "${THREAD_CONTROL}/status"
|
sed -e "1s/.*/${preamble}/;$((MTJOBID + 2))s/.*/${line}/" -i "${THREAD_CONTROL}/status"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Thread concurrency helpers to avoid concurrency issues with some code,
|
# Thread concurrency helpers to avoid concurrency issues with some code,
|
||||||
# eg. when Python installs directly into $TOOLCHAIN.
|
# eg. when Python installs directly into $TOOLCHAIN.
|
||||||
acquire_exclusive_lock() {
|
acquire_exclusive_lock() {
|
||||||
[ "${MTWITHLOCKS}" != "yes" ] && return 0
|
is_sequential_build && return 0
|
||||||
|
|
||||||
local pkg="$1" task="$2" lockfile="${3:-global}"
|
local pkg="$1" task="$2" lockfile="${3:-global}"
|
||||||
local this_job="${MTJOBID}"
|
local this_job="${MTJOBID}"
|
||||||
@ -1537,7 +1541,7 @@ acquire_exclusive_lock() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
release_exclusive_lock() {
|
release_exclusive_lock() {
|
||||||
[ "${MTWITHLOCKS}" != "yes" ] && return 0
|
is_sequential_build && return 0
|
||||||
|
|
||||||
local pkg="$1" task="$2" lockfile="${3:-global}"
|
local pkg="$1" task="$2" lockfile="${3:-global}"
|
||||||
|
|
||||||
@ -1557,6 +1561,20 @@ exec_thread_safe() {
|
|||||||
return ${result}
|
return ${result}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# A lightweight target specific lock (eg. image, sysroot)
|
||||||
|
acquire_update_lock() {
|
||||||
|
is_sequential_build && return 0
|
||||||
|
|
||||||
|
exec 97>"${THREAD_CONTROL}/locks/.update.${1}"
|
||||||
|
flock --exclusive 97
|
||||||
|
}
|
||||||
|
|
||||||
|
release_update_lock() {
|
||||||
|
is_sequential_build && return 0
|
||||||
|
|
||||||
|
flock --unlock 97 2>/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
# Use distribution functions if any
|
# Use distribution functions if any
|
||||||
if [ -f "distributions/$DISTRO/config/functions" ]; then
|
if [ -f "distributions/$DISTRO/config/functions" ]; then
|
||||||
. distributions/$DISTRO/config/functions
|
. distributions/$DISTRO/config/functions
|
||||||
|
@ -1,162 +1,62 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
# Copyright (C) 2019-present Team LibreELEC (https://libreelec.tv)
|
# Copyright (C) 2019-present Team LibreELEC (https://libreelec.tv)
|
||||||
|
|
||||||
THREADCOUNT=${THREADCOUNT:-100%}
|
THREADCOUNT=${THREADCOUNT:-$(nproc)}
|
||||||
|
|
||||||
# This function is passed a list of package.mk paths to be processed.
|
|
||||||
# Each package.mk is sourced with relevant variables output in JSON format.
|
|
||||||
json_worker() {
|
|
||||||
local packages="$@"
|
|
||||||
local pkgpath hierarchy exited
|
|
||||||
|
|
||||||
exit() { exited=1; }
|
|
||||||
|
|
||||||
. config/options ""
|
|
||||||
|
|
||||||
for pkgpath in ${packages}; do
|
|
||||||
pkgpath="${pkgpath%%@*}"
|
|
||||||
|
|
||||||
exited=0
|
|
||||||
if ! source_package "${pkgpath}/package.mk" &>/dev/null; then
|
|
||||||
unset -f exit
|
|
||||||
die "$(print_color CLR_ERROR "FAILURE: sourcing package ${pkgpath}/package.mk")"
|
|
||||||
fi
|
|
||||||
|
|
||||||
[ ${exited} -eq 1 ] && continue
|
|
||||||
|
|
||||||
[[ ${pkgpath} =~ ^${ROOT}/${PACKAGES}/ ]] && hierarchy="global" || hierarchy="local"
|
|
||||||
|
|
||||||
cat <<EOF
|
|
||||||
{
|
|
||||||
"name": "${PKG_NAME}",
|
|
||||||
"hierarchy": "${hierarchy}",
|
|
||||||
"section": "${PKG_SECTION}",
|
|
||||||
"bootstrap": "${PKG_DEPENDS_BOOTSTRAP}",
|
|
||||||
"init": "${PKG_DEPENDS_INIT}",
|
|
||||||
"host": "${PKG_DEPENDS_HOST}",
|
|
||||||
"target": "${PKG_DEPENDS_TARGET}"
|
|
||||||
},
|
|
||||||
EOF
|
|
||||||
done
|
|
||||||
}
|
|
||||||
export -f json_worker
|
|
||||||
|
|
||||||
# This function is passed the build instruction for a single job.
|
|
||||||
# The function will run either "build <package>" or "install <package>".
|
|
||||||
# ${slot} is the job slot number, ie. 1-8 when THREADCOUNT=8.
|
|
||||||
# ${job} is the sequence within the total number of ${jobs}.
|
|
||||||
package_worker() {
|
|
||||||
local slot=$1 job=$2 jobs=$3 args="$4"
|
|
||||||
local task pkgname result status
|
|
||||||
local addon istarget isaddon
|
|
||||||
|
|
||||||
export MTJOBID=${slot} MTMAXJOBS=${jobs}
|
|
||||||
|
|
||||||
read -r task pkgname <<< "${args}"
|
|
||||||
|
|
||||||
. config/options "${pkgname}"
|
|
||||||
|
|
||||||
[ ! -f "${THREAD_CONTROL}/parallel.pid" ] && echo "${PARALLEL_PID}" >"${THREAD_CONTROL}/parallel.pid"
|
|
||||||
|
|
||||||
${SCRIPTS}/${task} ${pkgname} 2>&1 && result=0 || result=1
|
|
||||||
|
|
||||||
[[ ${pkgname} =~ :target$ || "${pkgname//:/}" = "${pkgname}" ]] && istarget="yes" || istarget="no"
|
|
||||||
|
|
||||||
[[ "${MTADDONBUILD}" = "yes" && ( "${PKG_IS_ADDON}" = "yes" || "${PKG_IS_ADDON}" = "embedded" ) ]] && isaddon="yes" || isaddon="no"
|
|
||||||
|
|
||||||
if [ "${isaddon}" = "yes" -a "${istarget}" = "yes" ]; then
|
|
||||||
if [ ${result} -eq 0 ]; then
|
|
||||||
${SCRIPTS}/install_addon ${pkgname} 2>&1 && result=0 || result=1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ${result} -ne 0 ]; then
|
|
||||||
if [ -d "${THREAD_CONTROL}/logs" ]; then
|
|
||||||
echo "${PKG_NAME} ${THREAD_CONTROL}/logs/${job}/stdout" >>"${THREAD_CONTROL}/addons.failed"
|
|
||||||
else
|
|
||||||
echo "${PKG_NAME}" >>"${THREAD_CONTROL}/addons.failed"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
(
|
|
||||||
flock --exclusive 95
|
|
||||||
[ ${result} -eq 0 ] && status="DONE" || status="FAIL"
|
|
||||||
num=$(< "${THREAD_CONTROL}/progress")
|
|
||||||
mv "${THREAD_CONTROL}/progress" "${THREAD_CONTROL}/progress.prev"
|
|
||||||
num=$((num + 1))
|
|
||||||
echo ${num} >"${THREAD_CONTROL}/progress"
|
|
||||||
printf "[%0*d/%0*d] [%-4s] %-7s %s\n" ${#jobs} ${num} ${#jobs} ${jobs} "${status}" "${task}" "${pkgname}" >&2
|
|
||||||
) 95>"${THREAD_CONTROL}/locks/.progress"
|
|
||||||
|
|
||||||
if [ ${result} -eq 0 ]; then
|
|
||||||
pkg_lock_status "IDLE"
|
|
||||||
else
|
|
||||||
pkg_lock_status "FAILED" "${pkgname}" "${task}"
|
|
||||||
|
|
||||||
print_color CLR_ERROR "FAILURE: $SCRIPTS/${task} ${pkgname} has failed!\n"
|
|
||||||
|
|
||||||
if [ -d "${THREAD_CONTROL}/logs" ]; then
|
|
||||||
cat >&2 <<EOF
|
|
||||||
|
|
||||||
The following logs for this failure are available:
|
|
||||||
stdout: ${THREAD_CONTROL}/logs/${job}/stdout
|
|
||||||
stderr: ${THREAD_CONTROL}/logs/${job}/stderr
|
|
||||||
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
return ${result}
|
|
||||||
}
|
|
||||||
export -f package_worker
|
|
||||||
|
|
||||||
start_multithread_build() {
|
start_multithread_build() {
|
||||||
local singlethread buildopts result=0
|
local buildopts result=0
|
||||||
|
|
||||||
# init thread control folder
|
# init thread control folder
|
||||||
rm -rf "${THREAD_CONTROL}"
|
rm -rf "${THREAD_CONTROL}"
|
||||||
mkdir -p "${THREAD_CONTROL}/locks"
|
mkdir -p "${THREAD_CONTROL}/locks"
|
||||||
echo -1 >"${THREAD_CONTROL}/progress.prev"
|
echo -1 >"${THREAD_CONTROL}/progress.prev"
|
||||||
echo 0 >"${THREAD_CONTROL}/progress"
|
echo 0 >"${THREAD_CONTROL}/progress"
|
||||||
echo 0 >"${THREAD_CONTROL}/status.max"
|
|
||||||
touch "${THREAD_CONTROL}/status"
|
touch "${THREAD_CONTROL}/status"
|
||||||
|
|
||||||
# Increase file descriptors if building one thread/package
|
# Increase file descriptors if building one thread/package
|
||||||
[ "${THREADCOUNT}" = "0" ] && ulimit -n ${ULIMITN:-10240}
|
[ "${THREADCOUNT}" = "0" ] && ulimit -n ${ULIMITN:-10240}
|
||||||
|
|
||||||
# Bootstrap GNU parallel
|
|
||||||
MTWITHLOCKS=no $SCRIPTS/build parallel:host 2>&1 || die "Unable to bootstrap parallel package"
|
|
||||||
|
|
||||||
# determine number of available slots for the given THREADCOUNT - optimise logging for single threaded builds
|
|
||||||
[ $(seq 1 32 | ${TOOLCHAIN}/bin/parallel --plain --no-notice --max-procs ${THREADCOUNT} echo {%} | sort -n | tail -1) -eq 1 ] && singlethread=yes || singlethread=no
|
|
||||||
|
|
||||||
# create a single log file by default for a single threaded build (or the builder is a masochist)
|
# create a single log file by default for a single threaded build (or the builder is a masochist)
|
||||||
if [ "${singlethread}" = "yes" -a "${ONELOG,,}" != "no" ] || [ "${ONELOG,,}" = "yes" ]; then
|
if [ ${THREADCOUNT} -eq 1 -a "${ONELOG,,}" != "no" ] || [ "${ONELOG,,}" = "yes" ]; then
|
||||||
buildopts+=" --ungroup"
|
buildopts+=" --no-log-burst"
|
||||||
else
|
else
|
||||||
mkdir -p "${THREAD_CONTROL}/logs"
|
mkdir -p "${THREAD_CONTROL}/logs"
|
||||||
buildopts+=" --group --results ${THREAD_CONTROL}/logs/{#}/"
|
buildopts+=" --log-burst"
|
||||||
fi
|
fi
|
||||||
|
buildopts+=" --log-combine ${LOGCOMBINE:-always}"
|
||||||
|
|
||||||
# When building addons, don't halt on error - keep building all packages/addons
|
# When building addons, don't halt on error - keep building all packages/addons
|
||||||
[ "${MTADDONBUILD}" = "yes" ] && buildopts+=" --halt never" || buildopts+=" --halt now,fail=1"
|
[ "${MTADDONBUILD}" = "yes" ] && buildopts+=" --continue-on-error" || buildopts+=" --halt-on-error"
|
||||||
|
|
||||||
|
[ "${MTVERBOSE}" = "yes" ] && buildopts+=" --verbose"
|
||||||
|
[ "${MTDEBUG}" = "yes" ] && buildopts+=" --debug"
|
||||||
|
if [ "${DISABLE_COLORS}" = "yes" ]; then
|
||||||
|
buildopts+=" --colors=never"
|
||||||
|
else
|
||||||
|
buildopts+=" --colors=${MTCOLORS:-auto}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
buildopts+=" --stats-interval ${MTINTERVAL:-60}"
|
||||||
|
|
||||||
# pipefail: return value of a pipeline is the value of the last (rightmost) command to exit with a non-zero status
|
# pipefail: return value of a pipeline is the value of the last (rightmost) command to exit with a non-zero status
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
cat ${_CACHE_PACKAGE_GLOBAL} ${_CACHE_PACKAGE_LOCAL} | \
|
${SCRIPTS}/pkgjson | ${SCRIPTS}/genbuildplan.py --show-wants --with-json "${THREAD_CONTROL}"/plan.json \
|
||||||
${TOOLCHAIN}/bin/parallel --plain --no-notice --max-args 30 --halt now,fail=1 json_worker | \
|
--build ${@} > "${THREAD_CONTROL}"/plan || result=1
|
||||||
${SCRIPTS}/genbuildplan.py --no-reorder --show-wants --build ${@} > "${THREAD_CONTROL}"/plan || result=1
|
|
||||||
|
|
||||||
if [ ${result} -eq 0 ]; then
|
if [ ${result} -eq 0 ]; then
|
||||||
save_build_config
|
save_build_config
|
||||||
|
|
||||||
cat "${THREAD_CONTROL}"/plan | awk '{print $1 " " $2}' | \
|
# export the following vars so that they will be available to subprocesses of pkgbuilder.py
|
||||||
MTBUILDSTART=$(date +%s) MTWITHLOCKS=yes ${TOOLCHAIN}/bin/parallel \
|
export ROOT SCRIPTS THREAD_CONTROL
|
||||||
--plain --no-notice --max-procs ${THREADCOUNT} --joblog="${THREAD_CONTROL}/joblog" --plus ${buildopts} \
|
|
||||||
package_worker {%} {#} {##} {} || result=1
|
|
||||||
|
|
||||||
[ -f "${THREAD_CONTROL}"/history ] && echo && cat "${THREAD_CONTROL}"/history | ${ROOT}/tools/mtstats.py
|
MTBUILDSTART=$(date +%s) MTWITHLOCKS=yes ${SCRIPTS}/pkgbuilder.py \
|
||||||
|
--plan "${THREAD_CONTROL}"/plan.json \
|
||||||
|
--joblog "${THREAD_CONTROL}"/joblog \
|
||||||
|
--loadstats "${THREAD_CONTROL}"/loadstats \
|
||||||
|
--max-procs ${THREADCOUNT} ${buildopts} || result=1
|
||||||
|
|
||||||
|
[ ${result} -eq 0 -a -f "${THREAD_CONTROL}"/history ] && echo && cat "${THREAD_CONTROL}"/history | ${ROOT}/tools/mtstats.py
|
||||||
|
|
||||||
rm -f "${THREAD_CONTROL}/parallel.pid"
|
rm -f "${THREAD_CONTROL}/parallel.pid"
|
||||||
fi
|
fi
|
||||||
|
@ -9,7 +9,7 @@ PKG_ARCH="any"
|
|||||||
PKG_LICENSE="nonfree"
|
PKG_LICENSE="nonfree"
|
||||||
PKG_SITE="http://support.sundtek.com/"
|
PKG_SITE="http://support.sundtek.com/"
|
||||||
PKG_URL=""
|
PKG_URL=""
|
||||||
PKG_DEPENDS_TARGET="xmlstarlet:host"
|
PKG_DEPENDS_TARGET="xmlstarlet:host p7zip:host"
|
||||||
PKG_SECTION="driver/dvb"
|
PKG_SECTION="driver/dvb"
|
||||||
PKG_SHORTDESC="Sundtek MediaTV: a Linux driver to add support for SUNDTEK USB DVB devices"
|
PKG_SHORTDESC="Sundtek MediaTV: a Linux driver to add support for SUNDTEK USB DVB devices"
|
||||||
PKG_LONGDESC="Install this to add support for Sundtek USB DVB devices."
|
PKG_LONGDESC="Install this to add support for Sundtek USB DVB devices."
|
||||||
|
@ -10,7 +10,7 @@ PKG_ARCH="any"
|
|||||||
PKG_LICENSE="OSS"
|
PKG_LICENSE="OSS"
|
||||||
PKG_SITE="https://libreelec.tv"
|
PKG_SITE="https://libreelec.tv"
|
||||||
PKG_URL="https://github.com/LibreELEC/script.config.vdr/archive/$PKG_VERSION.tar.gz"
|
PKG_URL="https://github.com/LibreELEC/script.config.vdr/archive/$PKG_VERSION.tar.gz"
|
||||||
PKG_DEPENDS_TARGET="xmlstarlet:host"
|
PKG_DEPENDS_TARGET="xmlstarlet:host p7zip:host"
|
||||||
PKG_SECTION=""
|
PKG_SECTION=""
|
||||||
PKG_SHORTDESC="script.config.vdr"
|
PKG_SHORTDESC="script.config.vdr"
|
||||||
PKG_LONGDESC="script.config.vdr"
|
PKG_LONGDESC="script.config.vdr"
|
||||||
|
@ -1,11 +0,0 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
|
||||||
# Copyright (C) 2019-present Team LibreELEC (https://libreelec.tv)
|
|
||||||
|
|
||||||
PKG_NAME="parallel"
|
|
||||||
PKG_VERSION="20191022"
|
|
||||||
PKG_SHA256="641beea4fb9afccb1969ac0fb43ebc458f375ceb6f7e24970a9aced463e909a9"
|
|
||||||
PKG_LICENSE="GPLv3"
|
|
||||||
PKG_SITE="https://www.gnu.org/software/parallel/"
|
|
||||||
PKG_URL="http://ftpmirror.gnu.org/parallel/$PKG_NAME-$PKG_VERSION.tar.bz2"
|
|
||||||
PKG_DEPENDS_HOST=""
|
|
||||||
PKG_LONGDESC="GNU parallel is a shell tool for executing jobs in parallel using one or more computers."
|
|
@ -6,7 +6,7 @@ PKG_NAME="linux"
|
|||||||
PKG_LICENSE="GPL"
|
PKG_LICENSE="GPL"
|
||||||
PKG_SITE="http://www.kernel.org"
|
PKG_SITE="http://www.kernel.org"
|
||||||
PKG_DEPENDS_HOST="ccache:host rsync:host openssl:host"
|
PKG_DEPENDS_HOST="ccache:host rsync:host openssl:host"
|
||||||
PKG_DEPENDS_TARGET="toolchain linux:host cpio:host kmod:host xz:host wireless-regdb keyutils initramfs:init $KERNEL_EXTRA_DEPENDS_TARGET"
|
PKG_DEPENDS_TARGET="toolchain linux:host cpio:host kmod:host xz:host wireless-regdb keyutils $KERNEL_EXTRA_DEPENDS_TARGET"
|
||||||
PKG_NEED_UNPACK="$LINUX_DEPENDS $(get_pkg_directory initramfs) $(get_pkg_variable initramfs PKG_NEED_UNPACK)"
|
PKG_NEED_UNPACK="$LINUX_DEPENDS $(get_pkg_directory initramfs) $(get_pkg_variable initramfs PKG_NEED_UNPACK)"
|
||||||
PKG_LONGDESC="This package contains a precompiled kernel image and the modules."
|
PKG_LONGDESC="This package contains a precompiled kernel image and the modules."
|
||||||
PKG_IS_KERNEL_PKG="yes"
|
PKG_IS_KERNEL_PKG="yes"
|
||||||
@ -57,6 +57,13 @@ if [[ "$KERNEL_TARGET" = uImage* ]]; then
|
|||||||
PKG_DEPENDS_TARGET="$PKG_DEPENDS_TARGET u-boot-tools:host"
|
PKG_DEPENDS_TARGET="$PKG_DEPENDS_TARGET u-boot-tools:host"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Ensure that the dependencies of initramfs:target are built correctly, but
|
||||||
|
# we don't want to add initramfs:target as a direct dependency as we install
|
||||||
|
# this "manually" from within linux:target
|
||||||
|
for pkg in $(get_pkg_variable initramfs PKG_DEPENDS_TARGET); do
|
||||||
|
! listcontains "${PKG_DEPENDS_TARGET}" "${pkg}" && PKG_DEPENDS_TARGET+=" ${pkg}" || true
|
||||||
|
done
|
||||||
|
|
||||||
post_patch() {
|
post_patch() {
|
||||||
cp $PKG_KERNEL_CFG_FILE $PKG_BUILD/.config
|
cp $PKG_KERNEL_CFG_FILE $PKG_BUILD/.config
|
||||||
|
|
||||||
|
@ -63,9 +63,12 @@ case "${TARGET}" in
|
|||||||
"init") _pkg_depends="${PKG_DEPENDS_INIT}";;
|
"init") _pkg_depends="${PKG_DEPENDS_INIT}";;
|
||||||
"bootstrap") _pkg_depends="${PKG_DEPENDS_BOOTSTRAP}";;
|
"bootstrap") _pkg_depends="${PKG_DEPENDS_BOOTSTRAP}";;
|
||||||
esac
|
esac
|
||||||
for p in ${_pkg_depends}; do
|
|
||||||
${SCRIPTS}/build "${p}" "${PARENT_PKG}"
|
if is_sequential_build; then
|
||||||
done
|
for p in ${_pkg_depends}; do
|
||||||
|
${SCRIPTS}/build "${p}" "${PARENT_PKG}"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
# virtual packages are not built as they only contain dependencies, so dont go further here
|
# virtual packages are not built as they only contain dependencies, so dont go further here
|
||||||
if [ "${PKG_SECTION}" = "virtual" ]; then
|
if [ "${PKG_SECTION}" = "virtual" ]; then
|
||||||
@ -441,8 +444,13 @@ for i in $(find "${SYSROOT_PREFIX}" -type l 2>/dev/null); do
|
|||||||
done
|
done
|
||||||
|
|
||||||
# Transfer the new sysroot content to the shared sysroot
|
# Transfer the new sysroot content to the shared sysroot
|
||||||
|
acquire_update_lock sysroot
|
||||||
|
|
||||||
mkdir -p "${PKG_ORIG_SYSROOT_PREFIX}"
|
mkdir -p "${PKG_ORIG_SYSROOT_PREFIX}"
|
||||||
cp -PRf "${SYSROOT_PREFIX}"/* "${PKG_ORIG_SYSROOT_PREFIX}"
|
cp -PRf "${SYSROOT_PREFIX}"/* "${PKG_ORIG_SYSROOT_PREFIX}"
|
||||||
|
|
||||||
|
release_update_lock
|
||||||
|
|
||||||
rm -rf "${SYSROOT_PREFIX}"
|
rm -rf "${SYSROOT_PREFIX}"
|
||||||
|
|
||||||
export SYSROOT_PREFIX="${PKG_ORIG_SYSROOT_PREFIX}"
|
export SYSROOT_PREFIX="${PKG_ORIG_SYSROOT_PREFIX}"
|
||||||
|
@ -109,7 +109,7 @@ class Node:
|
|||||||
self.edges.append(node)
|
self.edges.append(node)
|
||||||
|
|
||||||
def eprint(*args, **kwargs):
|
def eprint(*args, **kwargs):
|
||||||
print(*args, file=sys.stderr, **kwargs)
|
print(*args, file=sys.stderr, **kwargs)
|
||||||
|
|
||||||
# Read a JSON list of all possible packages from stdin
|
# Read a JSON list of all possible packages from stdin
|
||||||
def loadPackages():
|
def loadPackages():
|
||||||
@ -178,7 +178,7 @@ def findbuildpos(node, list):
|
|||||||
return list.index(candidate) + 1 if candidate else -1
|
return list.index(candidate) + 1 if candidate else -1
|
||||||
|
|
||||||
# Resolve dependencies for a node
|
# Resolve dependencies for a node
|
||||||
def dep_resolve(node, resolved, unresolved, noreorder):
|
def dep_resolve(node, resolved, unresolved):
|
||||||
unresolved.append(node)
|
unresolved.append(node)
|
||||||
|
|
||||||
for edge in node.edges:
|
for edge in node.edges:
|
||||||
@ -186,19 +186,15 @@ def dep_resolve(node, resolved, unresolved, noreorder):
|
|||||||
if edge in unresolved:
|
if edge in unresolved:
|
||||||
raise Exception('Circular reference detected: %s -> %s\nRemove %s from %s package.mk::PKG_DEPENDS_%s' % \
|
raise Exception('Circular reference detected: %s -> %s\nRemove %s from %s package.mk::PKG_DEPENDS_%s' % \
|
||||||
(node.fqname, edge.commonName(), edge.commonName(), node.name, node.target.upper()))
|
(node.fqname, edge.commonName(), edge.commonName(), node.name, node.target.upper()))
|
||||||
dep_resolve(edge, resolved, unresolved, noreorder)
|
dep_resolve(edge, resolved, unresolved)
|
||||||
|
|
||||||
if node not in resolved:
|
if node not in resolved:
|
||||||
pos = -1 if noreorder else findbuildpos(node, resolved)
|
resolved.append(node)
|
||||||
if pos != -1:
|
|
||||||
resolved.insert(pos, node)
|
|
||||||
else:
|
|
||||||
resolved.append(node)
|
|
||||||
|
|
||||||
unresolved.remove(node)
|
unresolved.remove(node)
|
||||||
|
|
||||||
# Return a list of build steps for the trigger packages
|
# Return a list of build steps for the trigger packages
|
||||||
def get_build_steps(args, nodes, trigger_pkgs, built_pkgs):
|
def get_build_steps(args, nodes):
|
||||||
resolved = []
|
resolved = []
|
||||||
unresolved = []
|
unresolved = []
|
||||||
|
|
||||||
@ -210,12 +206,12 @@ def get_build_steps(args, nodes, trigger_pkgs, built_pkgs):
|
|||||||
#
|
#
|
||||||
install = True if "image" in args.build else False
|
install = True if "image" in args.build else False
|
||||||
|
|
||||||
for pkgname in [x for x in trigger_pkgs if x]:
|
for pkgname in [x for x in args.build if x]:
|
||||||
if pkgname.find(":") == -1:
|
if pkgname.find(":") == -1:
|
||||||
pkgname = "%s:target" % pkgname
|
pkgname = "%s:target" % pkgname
|
||||||
|
|
||||||
if pkgname in nodes:
|
if pkgname in nodes:
|
||||||
dep_resolve(nodes[pkgname], resolved, unresolved, args.no_reorder)
|
dep_resolve(nodes[pkgname], resolved, unresolved)
|
||||||
|
|
||||||
# Abort if any references remain unresolved
|
# Abort if any references remain unresolved
|
||||||
if unresolved != []:
|
if unresolved != []:
|
||||||
@ -226,14 +222,12 @@ def get_build_steps(args, nodes, trigger_pkgs, built_pkgs):
|
|||||||
|
|
||||||
# Output list of resolved dependencies
|
# Output list of resolved dependencies
|
||||||
for pkg in resolved:
|
for pkg in resolved:
|
||||||
if pkg.fqname not in built_pkgs:
|
task = "build" if pkg.fqname.endswith(":host") or pkg.fqname.endswith(":init") or not install else "install"
|
||||||
built_pkgs.append(pkg.fqname)
|
yield(task, pkg.fqname)
|
||||||
task = "build" if pkg.fqname.endswith(":host") or pkg.fqname.endswith(":init") or not install else "install"
|
|
||||||
yield(task, pkg.fqname)
|
|
||||||
|
|
||||||
# Reduce the complete list of packages to a map of those packages that will
|
# Reduce the complete list of packages to a map of those packages that will
|
||||||
# be needed for the build.
|
# be needed for the build.
|
||||||
def processPackages(args, packages, build):
|
def processPackages(args, packages):
|
||||||
# Add dummy package to ensure build/install dependencies are not culled
|
# Add dummy package to ensure build/install dependencies are not culled
|
||||||
pkg = {
|
pkg = {
|
||||||
"name": ROOT_PKG,
|
"name": ROOT_PKG,
|
||||||
@ -241,8 +235,8 @@ def processPackages(args, packages, build):
|
|||||||
"hierarchy": "global",
|
"hierarchy": "global",
|
||||||
"bootstrap": "",
|
"bootstrap": "",
|
||||||
"init": "",
|
"init": "",
|
||||||
"host": " ".join(get_packages_by_target("host", build)),
|
"host": " ".join(get_packages_by_target("host", args.build)),
|
||||||
"target": " ".join(get_packages_by_target("target", build))
|
"target": " ".join(get_packages_by_target("target", args.build))
|
||||||
}
|
}
|
||||||
|
|
||||||
packages[pkg["name"]] = initPackage(pkg)
|
packages[pkg["name"]] = initPackage(pkg)
|
||||||
@ -277,16 +271,17 @@ def processPackages(args, packages, build):
|
|||||||
needed_map[pkgname] = pkg
|
needed_map[pkgname] = pkg
|
||||||
|
|
||||||
# Validate package dependency references
|
# Validate package dependency references
|
||||||
for pkgname in needed_map:
|
if not args.ignore_invalid:
|
||||||
pkg = needed_map[pkgname]
|
for pkgname in needed_map:
|
||||||
for t in pkg.deps:
|
pkg = needed_map[pkgname]
|
||||||
for d in pkg.deps[t]:
|
for t in pkg.deps:
|
||||||
if split_package(d)[0] not in needed_map and not args.ignore_invalid:
|
for d in pkg.deps[t]:
|
||||||
msg = 'Invalid package reference: dependency %s in package %s::PKG_DEPENDS_%s is not valid' % (d, pkgname, t.upper())
|
if split_package(d)[0] not in needed_map:
|
||||||
if args.warn_invalid:
|
msg = 'Invalid package reference: dependency %s in package %s::PKG_DEPENDS_%s is not valid' % (d, pkgname, t.upper())
|
||||||
eprint("WARNING: %s" % msg)
|
if args.warn_invalid:
|
||||||
else:
|
eprint("WARNING: %s" % msg)
|
||||||
raise Exception(msg)
|
else:
|
||||||
|
raise Exception(msg)
|
||||||
|
|
||||||
node_map = {}
|
node_map = {}
|
||||||
|
|
||||||
@ -336,51 +331,53 @@ parser = argparse.ArgumentParser(description="Generate package dependency list f
|
|||||||
parser.add_argument("-b", "--build", nargs="+", metavar="PACKAGE", required=True, \
|
parser.add_argument("-b", "--build", nargs="+", metavar="PACKAGE", required=True, \
|
||||||
help="Space-separated list of build trigger packages, either for host or target. Required property - specify at least one package.")
|
help="Space-separated list of build trigger packages, either for host or target. Required property - specify at least one package.")
|
||||||
|
|
||||||
parser.add_argument("--warn-invalid", action="store_true", \
|
parser.add_argument("--warn-invalid", action="store_true", default=False, \
|
||||||
help="Warn about invalid/missing dependency packages, perhaps excluded by a PKG_ARCH incompatability. Default is to abort.")
|
help="Warn about invalid/missing dependency packages, perhaps excluded by a PKG_ARCH incompatability. Default is to abort.")
|
||||||
|
|
||||||
parser.add_argument("--no-reorder", action="store_true", default="True", \
|
parser.add_argument("--ignore-invalid", action="store_true", default=False, \
|
||||||
help="Do not resequence steps based on dependencies. This is the default.")
|
|
||||||
|
|
||||||
parser.add_argument("--reorder", action="store_false", dest="no_reorder", \
|
|
||||||
help="Disable --no-reorder and resequence packages to try and reduce stalls etc.")
|
|
||||||
|
|
||||||
parser.add_argument("--show-wants", action="store_true", \
|
|
||||||
help="Output \"wants\" dependencies for each step.")
|
|
||||||
|
|
||||||
parser.add_argument("--hide-wants", action="store_false", dest="show_wants", default="True", \
|
|
||||||
help="Disable --show-wants.")
|
|
||||||
|
|
||||||
parser.add_argument("--ignore-invalid", action="store_true", \
|
|
||||||
help="Ignore invalid packages.")
|
help="Ignore invalid packages.")
|
||||||
|
|
||||||
|
group = parser.add_mutually_exclusive_group()
|
||||||
|
group.add_argument("--show-wants", action="store_true", \
|
||||||
|
help="Output \"wants\" dependencies for each step.")
|
||||||
|
group.add_argument("--hide-wants", action="store_false", dest="show_wants", default=True, \
|
||||||
|
help="Disable --show-wants. This is the default.")
|
||||||
|
|
||||||
|
parser.add_argument("--with-json", metavar="FILE", \
|
||||||
|
help="File into which JSON formatted plan will be written.")
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
ALL_PACKAGES = loadPackages()
|
ALL_PACKAGES = loadPackages()
|
||||||
|
|
||||||
loaded = len(ALL_PACKAGES)
|
loaded = len(ALL_PACKAGES)
|
||||||
|
|
||||||
REQUIRED_PKGS = processPackages(args, ALL_PACKAGES, args.build)
|
REQUIRED_PKGS = processPackages(args, ALL_PACKAGES)
|
||||||
|
|
||||||
# Output list of packages to build/install
|
# Identify list of packages to build/install
|
||||||
built_pkgs = []
|
steps = [step for step in get_build_steps(args, REQUIRED_PKGS)]
|
||||||
steps = []
|
|
||||||
|
|
||||||
for step in get_build_steps(args, REQUIRED_PKGS, args.build, built_pkgs):
|
|
||||||
steps.append(step)
|
|
||||||
|
|
||||||
eprint("Packages loaded : %d" % loaded)
|
eprint("Packages loaded : %d" % loaded)
|
||||||
eprint("Build trigger(s): %d [%s]" % (len(args.build), " ".join(args.build)))
|
eprint("Build trigger(s): %d [%s]" % (len(args.build), " ".join(args.build)))
|
||||||
eprint("Package steps : %d" % len(steps))
|
eprint("Package steps : %d" % len(steps))
|
||||||
eprint("")
|
eprint("")
|
||||||
|
|
||||||
|
# Write the JSON build plan (with dependencies)
|
||||||
|
if args.with_json:
|
||||||
|
plan = []
|
||||||
|
for step in steps:
|
||||||
|
plan.append({"task": step[0],
|
||||||
|
"name": step[1],
|
||||||
|
"deps": [d.fqname for d in REQUIRED_PKGS[step[1]].edges]})
|
||||||
|
|
||||||
|
with open(args.with_json, "w") as out:
|
||||||
|
print(json.dumps(plan, indent=2, sort_keys=False), file=out)
|
||||||
|
|
||||||
# Output build/install steps
|
# Output build/install steps
|
||||||
if args.show_wants:
|
if args.show_wants:
|
||||||
for step in steps:
|
for step in steps:
|
||||||
wants = []
|
|
||||||
node = (REQUIRED_PKGS[step[1]])
|
node = (REQUIRED_PKGS[step[1]])
|
||||||
for e in node.edges:
|
wants = [edge.fqname for edge in node.edges]
|
||||||
wants.append(e.fqname)
|
|
||||||
print("%-7s %-25s (wants: %s)" % (step[0], step[1].replace(":target",""), ", ".join(wants).replace(":target","")))
|
print("%-7s %-25s (wants: %s)" % (step[0], step[1].replace(":target",""), ", ".join(wants).replace(":target","")))
|
||||||
else:
|
else:
|
||||||
for step in steps:
|
for step in steps:
|
||||||
|
@ -330,7 +330,7 @@ if [ "${1}" = "release" -o "${1}" = "mkimage" -o "${1}" = "noobs" ]; then
|
|||||||
|
|
||||||
# Re-install u-boot package
|
# Re-install u-boot package
|
||||||
rm ${STAMPS_INSTALL}/u-boot/install_target
|
rm ${STAMPS_INSTALL}/u-boot/install_target
|
||||||
UBOOT_SYSTEM="${UBOOT_SYSTEM}" ${SCRIPTS}/install u-boot
|
UBOOT_SYSTEM="${UBOOT_SYSTEM}" ${SCRIPTS}/install u-boot 2>&1
|
||||||
|
|
||||||
# Re-run bootloader/release
|
# Re-run bootloader/release
|
||||||
if find_file_path bootloader/release ${BOOTLOADER_DIR}/release; then
|
if find_file_path bootloader/release ${BOOTLOADER_DIR}/release; then
|
||||||
|
@ -45,14 +45,18 @@ mkdir -p ${STAMPS_INSTALL}/${PKG_NAME}
|
|||||||
|
|
||||||
${SCRIPTS}/build "${1}" "${PARENT_PKG}"
|
${SCRIPTS}/build "${1}" "${PARENT_PKG}"
|
||||||
|
|
||||||
if [ "${TARGET}" = "target" ] ; then
|
if is_sequential_build || [ "${PARENT_PKG}" = "initramfs:target" ]; then
|
||||||
for p in ${PKG_DEPENDS_TARGET}; do
|
if [ "${TARGET}" = "target" ] ; then
|
||||||
${SCRIPTS}/install "${p}" "${PARENT_PKG}"
|
for p in ${PKG_DEPENDS_TARGET}; do
|
||||||
done
|
${SCRIPTS}/install "${p}" "${PARENT_PKG}"
|
||||||
elif [ "${TARGET}" = "init" ] ; then
|
done
|
||||||
for p in ${PKG_DEPENDS_INIT}; do
|
elif [ "${TARGET}" = "init" ] ; then
|
||||||
${SCRIPTS}/install "${p}" "${PARENT_PKG}"
|
for p in ${PKG_DEPENDS_INIT}; do
|
||||||
done
|
${SCRIPTS}/install "${p}" "${PARENT_PKG}"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
if [ "${TARGET}" = "init" ] ; then
|
||||||
INSTALL=${BUILD}/initramfs
|
INSTALL=${BUILD}/initramfs
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -60,6 +64,8 @@ pkg_lock_status "ACTIVE" "${PKG_NAME}:${TARGET}" "install"
|
|||||||
|
|
||||||
build_msg "CLR_INSTALL" "INSTALL" "${PKG_NAME} $(print_color CLR_TARGET "(${TARGET})")" "indent"
|
build_msg "CLR_INSTALL" "INSTALL" "${PKG_NAME} $(print_color CLR_TARGET "(${TARGET})")" "indent"
|
||||||
|
|
||||||
|
acquire_update_lock image
|
||||||
|
|
||||||
mkdir -p ${INSTALL}
|
mkdir -p ${INSTALL}
|
||||||
|
|
||||||
if [ "${TARGET}" = "target" ] ; then
|
if [ "${TARGET}" = "target" ] ; then
|
||||||
@ -165,6 +171,8 @@ if [ "${TARGET}" = "target" ] ; then
|
|||||||
pkg_call_exists post_install && pkg_call post_install
|
pkg_call_exists post_install && pkg_call post_install
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
release_update_lock
|
||||||
|
|
||||||
touch ${STAMP}
|
touch ${STAMP}
|
||||||
|
|
||||||
pkg_lock_status "UNLOCK" "${PKG_NAME}:${TARGET}" "install" "installed"
|
pkg_lock_status "UNLOCK" "${PKG_NAME}:${TARGET}" "install" "installed"
|
||||||
|
79
scripts/pkgbuild
Executable file
79
scripts/pkgbuild
Executable file
@ -0,0 +1,79 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This function is passed the build instruction for a single job.
|
||||||
|
# The function will run either "build <package>" or "install <package>".
|
||||||
|
# ${slot} is the job slot number, ie. 1-8 when THREADCOUNT=8.
|
||||||
|
# ${job} is the sequence within the total number of ${jobs}.
|
||||||
|
package_worker() {
|
||||||
|
local slot=$1 job=$2 jobs=$3 maxslot=$4 task="$5" pkgname="$6" oseqinfo="$7"
|
||||||
|
local result status
|
||||||
|
local addon istarget isaddon
|
||||||
|
|
||||||
|
export MTJOBID=${slot} PARALLEL_SEQ=${job} MTMAXJOBS=${jobs} MTMAXSLOT=${maxslot}
|
||||||
|
|
||||||
|
. config/options "${pkgname}"
|
||||||
|
|
||||||
|
if [ -z "${oseqinfo}" ]; then
|
||||||
|
${SCRIPTS}/${task} ${pkgname} 2>&1 && result=0 || result=1
|
||||||
|
else
|
||||||
|
print_color CLR_ERROR "FAILURE [${task} ${pkgname}]: a previous dependency process has already failed!"
|
||||||
|
echo
|
||||||
|
echo
|
||||||
|
|
||||||
|
num=0
|
||||||
|
for failed_items in ${oseqinfo//;/ }; do
|
||||||
|
num=$((num + 1))
|
||||||
|
read -r ftask fpkgname fseq <<< "${failed_items//,/ }"
|
||||||
|
|
||||||
|
if [ -n "${fseq}" ]; then
|
||||||
|
[ ${num} -eq 1 ] && echo "The following log(s) for already failed dependencies are available:"
|
||||||
|
printf " %-7s %s => %s\n" "${ftask}" "${fpkgname}" "${THREAD_CONTROL}/logs/${fseq}.log"
|
||||||
|
else
|
||||||
|
print_color CLR_ERROR "ALREADY FAILED [${ftask} ${fpkg}]"
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo
|
||||||
|
result=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
[[ ${pkgname} =~ :target$ || "${pkgname//:/}" = "${pkgname}" ]] && istarget="yes" || istarget="no"
|
||||||
|
|
||||||
|
[[ "${MTADDONBUILD}" = "yes" && ( "${PKG_IS_ADDON}" = "yes" || "${PKG_IS_ADDON}" = "embedded" ) ]] && isaddon="yes" || isaddon="no"
|
||||||
|
|
||||||
|
if [ "${isaddon}" = "yes" -a "${istarget}" = "yes" ]; then
|
||||||
|
if [ ${result} -eq 0 ]; then
|
||||||
|
${SCRIPTS}/install_addon ${pkgname} 2>&1 && result=0 || result=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ${result} -ne 0 ]; then
|
||||||
|
if [ -d "${THREAD_CONTROL}/logs" ]; then
|
||||||
|
echo "${PKG_NAME} ${THREAD_CONTROL}/logs/${job}.log" >>"${THREAD_CONTROL}/addons.failed"
|
||||||
|
else
|
||||||
|
echo "${PKG_NAME}" >>"${THREAD_CONTROL}/addons.failed"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
(
|
||||||
|
flock --exclusive 95
|
||||||
|
[ ${result} -eq 0 ] && status="DONE" || status="FAIL"
|
||||||
|
num=$(< "${THREAD_CONTROL}/progress")
|
||||||
|
mv "${THREAD_CONTROL}/progress" "${THREAD_CONTROL}/progress.prev"
|
||||||
|
num=$((num + 1))
|
||||||
|
echo ${num} >"${THREAD_CONTROL}/progress"
|
||||||
|
) 95>"${THREAD_CONTROL}/locks/.progress"
|
||||||
|
|
||||||
|
if [ ${result} -eq 0 ]; then
|
||||||
|
pkg_lock_status "IDLE"
|
||||||
|
else
|
||||||
|
pkg_lock_status "FAILED" "${pkgname}" "${task}"
|
||||||
|
|
||||||
|
print_color CLR_ERROR "FAILURE: $SCRIPTS/${task} ${pkgname} has failed!"
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
|
||||||
|
return ${result}
|
||||||
|
}
|
||||||
|
|
||||||
|
package_worker "$1" "$2" "$3" "$4" "$5" "$6" "$7"
|
628
scripts/pkgbuilder.py
Executable file
628
scripts/pkgbuilder.py
Executable file
@ -0,0 +1,628 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
# Copyright (C) 2019-present Team LibreELEC (https://libreelec.tv)
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import datetime, time
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import codecs
|
||||||
|
import copy
|
||||||
|
import threading
|
||||||
|
import queue
|
||||||
|
import subprocess
|
||||||
|
import multiprocessing
|
||||||
|
|
||||||
|
# Ensure we can output any old crap to stdout and stderr
|
||||||
|
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
|
||||||
|
sys.stderr = codecs.getwriter("utf-8")(sys.stderr.detach())
|
||||||
|
|
||||||
|
# derive from subprocess to utilize wait4() for rusage stats
|
||||||
|
class RusagePopen(subprocess.Popen):
|
||||||
|
def _try_wait(self, wait_flags):
|
||||||
|
try:
|
||||||
|
(pid, sts, ru) = os.wait4(self.pid, wait_flags)
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno != errno.ECHILD:
|
||||||
|
raise
|
||||||
|
pid = self.pid
|
||||||
|
sts = 0
|
||||||
|
else:
|
||||||
|
self.rusage = ru
|
||||||
|
return (pid, sts)
|
||||||
|
|
||||||
|
def rusage_run(*popenargs, timeout=None, **kwargs):
|
||||||
|
with RusagePopen(*popenargs, **kwargs) as process:
|
||||||
|
try:
|
||||||
|
stdout, stderr = process.communicate(None, timeout=timeout)
|
||||||
|
except subprocess.TimeoutExpired as exc:
|
||||||
|
process.kill()
|
||||||
|
process.wait()
|
||||||
|
raise
|
||||||
|
except:
|
||||||
|
process.kill()
|
||||||
|
raise
|
||||||
|
retcode = process.poll()
|
||||||
|
res = subprocess.CompletedProcess(process.args, retcode, stdout, stderr)
|
||||||
|
res.rusage = process.rusage
|
||||||
|
return res
|
||||||
|
|
||||||
|
class GeneratorEmpty(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class GeneratorStalled(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class Generator:
|
||||||
|
def __init__(self, plan):
|
||||||
|
self.plan = plan
|
||||||
|
|
||||||
|
self.work = copy.deepcopy(self.plan)
|
||||||
|
self.building = {}
|
||||||
|
self.built = {}
|
||||||
|
self.failed = {}
|
||||||
|
|
||||||
|
self.check_no_deps = True
|
||||||
|
|
||||||
|
def canBuildJob(self, job):
|
||||||
|
for dep in job["deps"]:
|
||||||
|
if dep not in self.built:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def getFirstFailedJob(self, job):
|
||||||
|
for dep in job["deps"]:
|
||||||
|
if dep in self.failed:
|
||||||
|
failedjob = self.getFirstFailedJob(self.failed[dep])
|
||||||
|
if not failedjob:
|
||||||
|
return self.failed[dep]
|
||||||
|
else:
|
||||||
|
return failedjob
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def getAllFailedJobs(self, job):
|
||||||
|
flist = {}
|
||||||
|
for dep in job["deps"]:
|
||||||
|
if dep in self.failed:
|
||||||
|
failedjob = self.getFirstFailedJob(self.failed[dep])
|
||||||
|
if failedjob:
|
||||||
|
flist[failedjob["name"]] = failedjob
|
||||||
|
else:
|
||||||
|
flist[dep] = self.failed[dep]
|
||||||
|
|
||||||
|
return [flist[x] for x in flist]
|
||||||
|
|
||||||
|
def getNextJob(self):
|
||||||
|
if self.work == []:
|
||||||
|
raise GeneratorEmpty
|
||||||
|
|
||||||
|
# Always process jobs without dependencies first
|
||||||
|
# until we're sure there's none left...
|
||||||
|
if self.check_no_deps:
|
||||||
|
for i, job in enumerate(self.work):
|
||||||
|
if job["deps"] == []:
|
||||||
|
self.building[job["name"]] = True
|
||||||
|
del self.work[i]
|
||||||
|
job["failedjobs"] = self.getAllFailedJobs(job)
|
||||||
|
job["logfile"] = None
|
||||||
|
job["cmdproc"] = None
|
||||||
|
job["failed"] = False
|
||||||
|
return job
|
||||||
|
|
||||||
|
self.check_no_deps = False
|
||||||
|
|
||||||
|
# Process remaining jobs, trying to schedule
|
||||||
|
# only those jobs with all their dependencies satisifed
|
||||||
|
for i, job in enumerate(self.work):
|
||||||
|
if self.canBuildJob(job):
|
||||||
|
self.building[job["name"]] = True
|
||||||
|
del self.work[i]
|
||||||
|
job["failedjobs"] = self.getAllFailedJobs(job)
|
||||||
|
job["logfile"] = None
|
||||||
|
job["cmdproc"] = None
|
||||||
|
job["failed"] = False
|
||||||
|
return job
|
||||||
|
|
||||||
|
raise GeneratorStalled
|
||||||
|
|
||||||
|
# Return details about stalled jobs that can't build until the
|
||||||
|
# currently building jobs are complete.
|
||||||
|
def getStallInfo(self):
|
||||||
|
for job in self.work:
|
||||||
|
for dep in job["deps"]:
|
||||||
|
if dep not in self.building and dep not in self.built:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
yield (job["name"], [d for d in job["deps"] if d in self.building])
|
||||||
|
|
||||||
|
def activeJobCount(self):
|
||||||
|
return len(self.building)
|
||||||
|
|
||||||
|
def activeJobNames(self):
|
||||||
|
for name in self.building:
|
||||||
|
yield name
|
||||||
|
|
||||||
|
def failedJobCount(self):
|
||||||
|
return len(self.failed)
|
||||||
|
|
||||||
|
def totalJobCount(self):
|
||||||
|
return len(self.plan)
|
||||||
|
|
||||||
|
def completed(self, job):
|
||||||
|
del self.building[job["name"]]
|
||||||
|
self.built[job["name"]] = job
|
||||||
|
if job["failed"]:
|
||||||
|
self.failed[job["name"]] = job
|
||||||
|
|
||||||
|
class BuildProcess(threading.Thread):
|
||||||
|
def __init__(self, slot, maxslot, jobtotal, haltonerror, work, complete):
|
||||||
|
threading.Thread.__init__(self, daemon=True)
|
||||||
|
|
||||||
|
self.slot = slot
|
||||||
|
self.maxslot = maxslot
|
||||||
|
self.jobtotal = jobtotal
|
||||||
|
self.haltonerror = haltonerror
|
||||||
|
self.work = work
|
||||||
|
self.complete = complete
|
||||||
|
|
||||||
|
self.active = False
|
||||||
|
|
||||||
|
self.stopping = False
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self.stopping = True
|
||||||
|
self.work.put(None)
|
||||||
|
|
||||||
|
def isActive(self):
|
||||||
|
return self.active == True
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
while not self.stopping:
|
||||||
|
job = self.work.get(block=True)
|
||||||
|
if job == None or self.stopping:
|
||||||
|
break
|
||||||
|
|
||||||
|
self.active = True
|
||||||
|
|
||||||
|
job["slot"] = self.slot
|
||||||
|
job["failed"] = self.execute(job)
|
||||||
|
job["status"] = "FAIL" if job["failed"] else "DONE"
|
||||||
|
self.complete.put(job)
|
||||||
|
|
||||||
|
self.active = False
|
||||||
|
|
||||||
|
if job["failed"] and self.haltonerror:
|
||||||
|
break
|
||||||
|
|
||||||
|
def execute(self, job):
|
||||||
|
if job["failedjobs"]:
|
||||||
|
flist = []
|
||||||
|
for fjob in job["failedjobs"]:
|
||||||
|
failedinfo = "%s,%s" % (fjob["task"], fjob["name"])
|
||||||
|
if fjob["logfile"]:
|
||||||
|
failedinfo = "%s,%s" % (failedinfo, fjob["seq"])
|
||||||
|
flist.append(failedinfo)
|
||||||
|
failedinfo = ";".join(flist)
|
||||||
|
else:
|
||||||
|
failedinfo = ""
|
||||||
|
|
||||||
|
job["args"] = ["%s/%s/pkgbuild" % (ROOT, SCRIPTS),
|
||||||
|
"%d" % self.slot, "%d" % job["seq"], "%d" % self.jobtotal, "%d" % self.maxslot,
|
||||||
|
job["task"], job["name"], failedinfo]
|
||||||
|
|
||||||
|
job["start"] = time.time()
|
||||||
|
returncode = 1
|
||||||
|
try:
|
||||||
|
if job["logfile"]:
|
||||||
|
with open(job["logfile"], "w") as logfile:
|
||||||
|
cmd = rusage_run(job["args"], cwd=ROOT,
|
||||||
|
stdin=subprocess.PIPE, stdout=logfile, stderr=subprocess.STDOUT,
|
||||||
|
universal_newlines=True, shell=False)
|
||||||
|
returncode = cmd.returncode
|
||||||
|
job["cmdproc" ] = cmd
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
cmd = rusage_run(job["args"], cwd=ROOT,
|
||||||
|
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
||||||
|
universal_newlines=True, shell=False,
|
||||||
|
encoding="utf-8", errors="replace")
|
||||||
|
returncode = cmd.returncode
|
||||||
|
job["cmdproc" ] = cmd
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
print('\nPKGBUILDER ERROR: UnicodeDecodeError while reading cmd.stdout from "%s %s"\n' % (job["task"], job["name"]), file=sys.stderr, flush=True)
|
||||||
|
except Exception as e:
|
||||||
|
print("\nPKGBUILDER ERROR: %s exception while executing: %s\n" % (str(e), job["args"]), file=sys.stderr, flush=True)
|
||||||
|
|
||||||
|
job["end"] = time.time()
|
||||||
|
job["elapsed"] = job["end"] - job["start"]
|
||||||
|
|
||||||
|
if job["cmdproc"]:
|
||||||
|
job["utime"] = job["cmdproc"].rusage.ru_utime
|
||||||
|
job["stime"] = job["cmdproc"].rusage.ru_stime
|
||||||
|
else:
|
||||||
|
job["utime"] = job["stime"] = 0
|
||||||
|
|
||||||
|
if job["elapsed"] == 0.0:
|
||||||
|
job["cpu"] = 0.0
|
||||||
|
else:
|
||||||
|
job["cpu"] = round((job["utime"] + job["stime"]) * 100 / job["elapsed"])
|
||||||
|
|
||||||
|
return (returncode != 0)
|
||||||
|
|
||||||
|
class Builder:
|
||||||
|
def __init__(self, maxthreadcount, inputfilename, jobglog, loadstats, stats_interval, \
|
||||||
|
haltonerror=True, log_burst=True, log_combine="always", \
|
||||||
|
debug=False, verbose=False, colors=False):
|
||||||
|
if inputfilename == "-":
|
||||||
|
plan = json.load(sys.stdin)
|
||||||
|
else:
|
||||||
|
with open(inputfilename, "r") as infile:
|
||||||
|
plan = json.load(infile)
|
||||||
|
|
||||||
|
self.generator = Generator(plan)
|
||||||
|
|
||||||
|
self.joblog = jobglog
|
||||||
|
self.loadstats = loadstats
|
||||||
|
self.stats_interval = int(stats_interval)
|
||||||
|
if self.stats_interval < 1:
|
||||||
|
self.stats_interval = 60
|
||||||
|
|
||||||
|
self.haltonerror = haltonerror
|
||||||
|
self.log_burst = log_burst
|
||||||
|
self.log_combine = log_combine
|
||||||
|
self.debug = debug
|
||||||
|
self.verbose = verbose
|
||||||
|
|
||||||
|
self.colors = (colors == "always" or (colors == "auto" and sys.stderr.isatty()))
|
||||||
|
self.color_code = {}
|
||||||
|
self.color_code["DONE"] = "\033[0;32m" #green
|
||||||
|
self.color_code["FAIL"] = "\033[1;31m" #bold red
|
||||||
|
self.color_code["ACTV"] = "\033[0;33m" #yellow
|
||||||
|
self.color_code["IDLE"] = "\033[0;35m" #magenta
|
||||||
|
self.color_code["INIT"] = "\033[0;36m" #cyan
|
||||||
|
|
||||||
|
self.work = queue.Queue()
|
||||||
|
self.complete = queue.Queue()
|
||||||
|
|
||||||
|
self.jobtotal = self.generator.totalJobCount()
|
||||||
|
self.twidth = len("%d" % self.jobtotal)
|
||||||
|
|
||||||
|
self.joblogfile = None
|
||||||
|
self.loadstatsfile = None
|
||||||
|
self.nextstats = 0
|
||||||
|
|
||||||
|
self.build_start = 0
|
||||||
|
|
||||||
|
# work and completion sequences
|
||||||
|
self.cseq = 0
|
||||||
|
self.wseq = 0
|
||||||
|
|
||||||
|
# parse threadcount
|
||||||
|
if maxthreadcount.endswith("%"):
|
||||||
|
self.threadcount = int(multiprocessing.cpu_count() / 100 * int(args.max_procs.replace("%","")))
|
||||||
|
else:
|
||||||
|
if args.max_procs == "0":
|
||||||
|
self.threadcount = 256
|
||||||
|
else:
|
||||||
|
self.threadcount = int(maxthreadcount)
|
||||||
|
|
||||||
|
self.threadcount = 1 if self.threadcount < 1 else self.threadcount
|
||||||
|
self.threadcount = self.jobtotal if self.jobtotal <= self.threadcount else self.threadcount
|
||||||
|
|
||||||
|
if args.debug:
|
||||||
|
DEBUG("THREADCOUNT#: input arg: %s, computed: %d" % (maxthreadcount, self.threadcount))
|
||||||
|
|
||||||
|
# Init all processes
|
||||||
|
self.processes = []
|
||||||
|
for i in range(1, self.threadcount + 1):
|
||||||
|
self.processes.append(BuildProcess(i, self.threadcount, self.jobtotal, haltonerror, self.work, self.complete))
|
||||||
|
|
||||||
|
def build(self):
|
||||||
|
if self.joblog:
|
||||||
|
self.joblogfile = open(self.joblog, "w")
|
||||||
|
|
||||||
|
if self.loadstats:
|
||||||
|
self.loadstatsfile = open(self.loadstats, "w")
|
||||||
|
|
||||||
|
self.startProcesses()
|
||||||
|
|
||||||
|
self.build_start = time.time()
|
||||||
|
|
||||||
|
# Queue new work until no more work is available, and all queued jobs have completed.
|
||||||
|
while self.queueWork():
|
||||||
|
job = self.getCompletedJob()
|
||||||
|
|
||||||
|
self.writeJobLog(job)
|
||||||
|
self.processJobOutput(job)
|
||||||
|
self.displayJobStatus(job)
|
||||||
|
|
||||||
|
job["cmdproc"] = None
|
||||||
|
job = None
|
||||||
|
|
||||||
|
if self.generator.failedJobCount() != 0 and self.haltonerror:
|
||||||
|
break
|
||||||
|
|
||||||
|
self.captureStats(finished=True)
|
||||||
|
self.stopProcesses()
|
||||||
|
|
||||||
|
if self.joblogfile:
|
||||||
|
self.joblogfile.close()
|
||||||
|
|
||||||
|
if self.loadstatsfile:
|
||||||
|
self.loadstatsfile.close()
|
||||||
|
|
||||||
|
return (self.generator.failedJobCount() == 0)
|
||||||
|
|
||||||
|
# Fill work queue with enough jobs to keep all processes busy.
|
||||||
|
# Return True while jobs remain available to build, or queued jobs are still building.
|
||||||
|
# Return False once all jobs have been queued, and finished building.
|
||||||
|
def queueWork(self):
|
||||||
|
try:
|
||||||
|
for i in range(self.generator.activeJobCount(), self.threadcount):
|
||||||
|
job = self.generator.getNextJob()
|
||||||
|
|
||||||
|
if self.verbose:
|
||||||
|
self.vprint("INIT", "submit", job["name"])
|
||||||
|
|
||||||
|
if self.debug:
|
||||||
|
DEBUG("Queueing Job: %s %s" % (job["task"], job["name"]))
|
||||||
|
|
||||||
|
self.wseq += 1
|
||||||
|
job["seq"] = self.wseq
|
||||||
|
if self.log_burst:
|
||||||
|
job["logfile"] = "%s/logs/%d.log" % (THREAD_CONTROL, job["seq"])
|
||||||
|
|
||||||
|
self.work.put(job)
|
||||||
|
|
||||||
|
if self.verbose:
|
||||||
|
self.vprint("ACTV", "active", ", ".join(self.generator.activeJobNames()))
|
||||||
|
|
||||||
|
if self.debug:
|
||||||
|
freeslots = self.threadcount - self.generator.activeJobCount()
|
||||||
|
DEBUG("Building Now: %d active, %d idle [%s]" % (self.generator.activeJobCount(), freeslots, ", ".join(self.generator.activeJobNames())))
|
||||||
|
|
||||||
|
except GeneratorStalled:
|
||||||
|
if self.verbose:
|
||||||
|
freeslots = self.threadcount - self.generator.activeJobCount()
|
||||||
|
pending = []
|
||||||
|
for (i, (package, wants)) in enumerate(self.generator.getStallInfo()):
|
||||||
|
pending.append("%s (wants: %s)" % (package, ", ".join(wants)))
|
||||||
|
self.vprint("ACTV", "active", ", ".join(self.generator.activeJobNames()))
|
||||||
|
self.vprint("IDLE", "stalled", "; ".join(pending), p1=len(pending))
|
||||||
|
|
||||||
|
if self.debug:
|
||||||
|
freeslots = self.threadcount - self.generator.activeJobCount()
|
||||||
|
DEBUG("Building Now: %d active, %d idle [%s]" % (self.generator.activeJobCount(), freeslots, ", ".join(self.generator.activeJobNames())))
|
||||||
|
for (i, (package, wants)) in enumerate(self.generator.getStallInfo()):
|
||||||
|
item = "%-25s wants: %s" % (package, ", ".join(wants))
|
||||||
|
if i == 0:
|
||||||
|
DEBUG("Stalled Jobs: %s" % item)
|
||||||
|
else:
|
||||||
|
DEBUG(" %s" % item)
|
||||||
|
|
||||||
|
except GeneratorEmpty:
|
||||||
|
if self.generator.activeJobCount() == 0:
|
||||||
|
if self.debug:
|
||||||
|
DEBUG("NO MORE JOBS: All jobs have completed - exiting.")
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
if self.debug:
|
||||||
|
n = self.generator.activeJobCount()
|
||||||
|
DEBUG("NO MORE JOBS: Waiting on %d job%s to complete..." % (n, ["s",""][n == 1]))
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Wait until a new job is available
|
||||||
|
def getCompletedJob(self):
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
job = self.complete.get(block=True, timeout=self.captureStats(finished=False))
|
||||||
|
self.generator.completed(job)
|
||||||
|
|
||||||
|
if self.debug:
|
||||||
|
DEBUG("Finished Job: %s %s [%s] after %0.3f seconds" % (job["task"], job["name"], job["status"], job["elapsed"]))
|
||||||
|
|
||||||
|
return job
|
||||||
|
|
||||||
|
except queue.Empty:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def captureStats(self, finished=False):
|
||||||
|
if not self.loadstatsfile:
|
||||||
|
return None
|
||||||
|
|
||||||
|
now = time.time()
|
||||||
|
if now >= self.nextstats or finished:
|
||||||
|
self.nextstats = int(now - (now % self.stats_interval)) + self.stats_interval
|
||||||
|
|
||||||
|
loadavg = open("/proc/loadavg", "r").readline().split()
|
||||||
|
procs = loadavg[3].split("/")
|
||||||
|
meminfo = dict((i.split()[0].rstrip(':'),int(i.split()[1])) for i in open("/proc/meminfo", "r").readlines())
|
||||||
|
|
||||||
|
print("%d %06d %5s %5s %5s %3s %4s %9d %2d %s" % (now, now - self.build_start, \
|
||||||
|
loadavg[0], loadavg[1], loadavg[2], procs[0], procs[1], meminfo["MemAvailable"], \
|
||||||
|
self.generator.activeJobCount(), ",".join(self.generator.activeJobNames())), \
|
||||||
|
file=self.loadstatsfile, flush=True)
|
||||||
|
|
||||||
|
return (self.nextstats - time.time())
|
||||||
|
|
||||||
|
# Output progress info, and links to any relevant logs
|
||||||
|
def displayJobStatus(self, job):
|
||||||
|
self.cseq += 1
|
||||||
|
print("[%0*d/%0*d] [%s] %-7s %s" %
|
||||||
|
(self.twidth, self.cseq, self.twidth, self.jobtotal,
|
||||||
|
self.colorise(job["status"]), job["task"], job["name"]), file=sys.stderr, flush=True)
|
||||||
|
|
||||||
|
if job["failed"]:
|
||||||
|
if job["logfile"]:
|
||||||
|
print("\nThe following log for this failure is available:\n %s\n" % job["logfile"], \
|
||||||
|
file=sys.stderr, flush=True)
|
||||||
|
|
||||||
|
if job["failedjobs"] and job["failedjobs"][0]["logfile"]:
|
||||||
|
if len(job["failedjobs"]) == 1:
|
||||||
|
print("The following log from the failed dependency may be relevant:", file=sys.stderr)
|
||||||
|
else:
|
||||||
|
print("The following logs from the failed dependencies may be relevant:", file=sys.stderr)
|
||||||
|
for fjob in job["failedjobs"]:
|
||||||
|
print(" %-7s %s => %s" % (fjob["task"], fjob["name"], fjob["logfile"]), file=sys.stderr)
|
||||||
|
print("", file=sys.stderr)
|
||||||
|
sys.stderr.flush()
|
||||||
|
|
||||||
|
# If configured, send output for a job (either a logfile, or captured stdout) to stdout
|
||||||
|
def processJobOutput(self, job):
|
||||||
|
log_processed = False
|
||||||
|
log_size = 0
|
||||||
|
log_start = time.time()
|
||||||
|
|
||||||
|
if job["logfile"]:
|
||||||
|
if self.log_combine == "always" or (job["failed"] and self.log_combine == "fail"):
|
||||||
|
try:
|
||||||
|
with open(job["logfile"], "r", encoding="utf-8", errors="replace") as logfile:
|
||||||
|
for line in logfile:
|
||||||
|
print(line, end="")
|
||||||
|
if self.debug:
|
||||||
|
log_size += len(line)
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
print("\nPKGBUILDER ERROR: UnicodeDecodeError while reading log file %s\n" % job["logfile"], file=sys.stderr, flush=True)
|
||||||
|
|
||||||
|
if job["failed"]:
|
||||||
|
print("\nThe following log for this failure is available:\n %s\n" % job["logfile"])
|
||||||
|
|
||||||
|
sys.stdout.flush()
|
||||||
|
log_processed = True
|
||||||
|
|
||||||
|
elif job["cmdproc"]:
|
||||||
|
if self.log_combine == "always" or (job["failed"] and self.log_combine == "fail"):
|
||||||
|
for line in job["cmdproc"].stdout:
|
||||||
|
print(line, end="", file=sys.stdout)
|
||||||
|
if self.debug:
|
||||||
|
log_size += len(line)
|
||||||
|
sys.stdout.flush()
|
||||||
|
log_processed = True
|
||||||
|
|
||||||
|
log_elapsed = time.time() - log_start
|
||||||
|
|
||||||
|
if self.debug and log_processed:
|
||||||
|
log_rate = int(log_size / log_elapsed) if log_elapsed != 0 else 0
|
||||||
|
log_data = ", %s" % "/".join(job["logfile"].split("/")[-2:]) if job["logfile"] else ""
|
||||||
|
DEBUG("WRITING LOG : {0:,} bytes in {1:0.3f} seconds ({2:,d} bytes/sec{3:})".format(log_size, log_elapsed, log_rate, log_data))
|
||||||
|
|
||||||
|
# Log completion stats for job
|
||||||
|
def writeJobLog(self, job):
|
||||||
|
if self.joblogfile:
|
||||||
|
print("{j[status]} {j[seq]:0{width}} {j[slot]} {j[task]} {j[name]} " \
|
||||||
|
"{j[utime]:.{prec}f} {j[stime]:.{prec}f} {j[cpu]} " \
|
||||||
|
"{j[elapsed]:.{prec}f} {j[start]:.{prec}f} {j[end]:.{prec}f} {0}" \
|
||||||
|
.format(job["logfile"] if job["logfile"] else "",
|
||||||
|
j=job, prec=4, width=self.twidth),
|
||||||
|
file=self.joblogfile, flush=True)
|
||||||
|
|
||||||
|
def startProcesses(self):
|
||||||
|
for process in self.processes:
|
||||||
|
process.start()
|
||||||
|
|
||||||
|
def stopProcesses(self):
|
||||||
|
for process in self.processes:
|
||||||
|
process.stop()
|
||||||
|
|
||||||
|
def vprint(self, status, task, data, p1=None, p2=None):
|
||||||
|
p1 = (self.threadcount - self.generator.activeJobCount()) if p1 == None else p1
|
||||||
|
p2 = self.generator.activeJobCount() if p2 == None else p2
|
||||||
|
print("[%0*d/%0*d] [%4s] %-7s %s" %
|
||||||
|
(self.twidth, p1, self.twidth, p2,
|
||||||
|
self.colorise(status), task, data), file=sys.stderr, flush=True)
|
||||||
|
|
||||||
|
def colorise(self, item):
|
||||||
|
if self.colors:
|
||||||
|
return "%s%-4s\033[0m" % (self.color_code[item], item)
|
||||||
|
return item
|
||||||
|
|
||||||
|
def DEBUG(msg):
|
||||||
|
if DEBUG_LOG:
|
||||||
|
print("%s: %s" % (datetime.datetime.now(), msg), file=DEBUG_LOG, flush=True)
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description="Run processes to build the specified JSON plan", \
|
||||||
|
formatter_class=lambda prog: argparse.HelpFormatter(prog,max_help_position=25,width=90))
|
||||||
|
|
||||||
|
parser.add_argument("--max-procs", required=False, default="100%", \
|
||||||
|
help="Maximum number of processes to use. 0 is unlimited. Can be expressed as " \
|
||||||
|
"a percentage, for example 50%% (of $(nproc)). Default is 100%%.")
|
||||||
|
|
||||||
|
parser.add_argument("--plan", metavar="FILE", default="-", \
|
||||||
|
help="JSON formatted plan to be processed (default is to read from stdin).")
|
||||||
|
|
||||||
|
parser.add_argument("--joblog", metavar="FILE", default=None, \
|
||||||
|
help="File into which job completion information will be written.")
|
||||||
|
|
||||||
|
parser.add_argument("--loadstats", metavar="FILE", default=None, \
|
||||||
|
help="File into which load average and memory statistics will be written.")
|
||||||
|
|
||||||
|
parser.add_argument("--stats-interval", metavar="SECONDS", type=int, default=60, \
|
||||||
|
help="Sampling interval in seconds for --loadstats. Default is 60.")
|
||||||
|
|
||||||
|
group = parser.add_mutually_exclusive_group()
|
||||||
|
group.add_argument("--log-burst", action="store_true", default=True, \
|
||||||
|
help="Burst job output into individual log files. This is the default.")
|
||||||
|
group.add_argument("--no-log-burst", action="store_false", dest="log_burst", \
|
||||||
|
help="Disable --log-burst, job output is only written to stdout.")
|
||||||
|
|
||||||
|
group = parser.add_mutually_exclusive_group()
|
||||||
|
group.add_argument("--log-combine", choices=["always", "never", "fail"], default="always", \
|
||||||
|
help='Choose when to send job output to stdout. "fail" will send to stdout the ' \
|
||||||
|
'log of failed jobs only, while "never" will not send any logs to stdout. ' \
|
||||||
|
'Default is "always".')
|
||||||
|
|
||||||
|
group = parser.add_mutually_exclusive_group()
|
||||||
|
group.add_argument("--halt-on-error", action="store_true", default=True, \
|
||||||
|
help="Halt on first build failure. This is the default.")
|
||||||
|
group.add_argument("--continue-on-error", action="store_false", dest="halt_on_error", \
|
||||||
|
help="Disable --halt-on-error and continue building.")
|
||||||
|
|
||||||
|
parser.add_argument("--verbose", action="store_true", default=False, \
|
||||||
|
help="Output verbose information to stderr.")
|
||||||
|
|
||||||
|
parser.add_argument("--debug", action="store_true", default=False, \
|
||||||
|
help="Enable debug information.")
|
||||||
|
|
||||||
|
parser.add_argument("--colors", choices=["always", "never", "auto"], default="auto", \
|
||||||
|
help="Color code status (DONE, FAIL, etc) labels.")
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
#---------------------------
|
||||||
|
|
||||||
|
ROOT = os.environ.get("ROOT", os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
|
||||||
|
SCRIPTS = os.environ.get("SCRIPTS", "scripts")
|
||||||
|
THREAD_CONTROL = os.environ["THREAD_CONTROL"]
|
||||||
|
|
||||||
|
if args.debug:
|
||||||
|
debug_log = "%s/debug.log" % THREAD_CONTROL
|
||||||
|
DEBUG_LOG = open(debug_log, "w")
|
||||||
|
print("Debug information is being written to: %s\n" % debug_log, file=sys.stderr, flush=True)
|
||||||
|
else:
|
||||||
|
DEBUG_LOG = None
|
||||||
|
|
||||||
|
with open("%s/parallel.pid" % THREAD_CONTROL, "w") as pid:
|
||||||
|
print("%d" % os.getpid(), file=pid)
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = Builder(args.max_procs, args.plan, args.joblog, args.loadstats, args.stats_interval, \
|
||||||
|
haltonerror=args.halt_on_error, \
|
||||||
|
log_burst=args.log_burst, log_combine=args.log_combine, \
|
||||||
|
colors=args.colors, debug=args.debug, verbose=args.verbose).build()
|
||||||
|
|
||||||
|
if DEBUG_LOG:
|
||||||
|
DEBUG_LOG.close()
|
||||||
|
|
||||||
|
sys.exit(0 if result else 1)
|
||||||
|
except (KeyboardInterrupt, SystemExit) as e:
|
||||||
|
if type(e) == SystemExit:
|
||||||
|
sys.exit(int(str(e)))
|
||||||
|
else:
|
||||||
|
sys.exit(1)
|
||||||
|
|
55
scripts/pkgjson
Executable file
55
scripts/pkgjson
Executable file
@ -0,0 +1,55 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
# Copyright (C) 2019-present Team LibreELEC (https://libreelec.tv)
|
||||||
|
|
||||||
|
. config/options ""
|
||||||
|
|
||||||
|
# This function is passed a list of package.mk paths to be processed.
|
||||||
|
# Each package.mk is sourced with relevant variables output in JSON format.
|
||||||
|
json_worker() {
|
||||||
|
local packages="$@"
|
||||||
|
local pkgpath hierarchy exited
|
||||||
|
|
||||||
|
exit() { exited=1; }
|
||||||
|
|
||||||
|
for pkgpath in ${packages}; do
|
||||||
|
pkgpath="${pkgpath%%@*}"
|
||||||
|
|
||||||
|
exited=0
|
||||||
|
if ! source_package "${pkgpath}/package.mk" &>/dev/null; then
|
||||||
|
unset -f exit
|
||||||
|
die "$(print_color CLR_ERROR "FAILURE: sourcing package ${pkgpath}/package.mk")"
|
||||||
|
fi
|
||||||
|
|
||||||
|
[ ${exited} -eq 1 ] && continue
|
||||||
|
|
||||||
|
[[ ${pkgpath} =~ ^${ROOT}/${PACKAGES}/ ]] && hierarchy="global" || hierarchy="local"
|
||||||
|
|
||||||
|
cat <<EOF
|
||||||
|
{
|
||||||
|
"name": "${PKG_NAME}",
|
||||||
|
"hierarchy": "${hierarchy}",
|
||||||
|
"section": "${PKG_SECTION}",
|
||||||
|
"bootstrap": "${PKG_DEPENDS_BOOTSTRAP}",
|
||||||
|
"init": "${PKG_DEPENDS_INIT}",
|
||||||
|
"host": "${PKG_DEPENDS_HOST}",
|
||||||
|
"target": "${PKG_DEPENDS_TARGET}"
|
||||||
|
},
|
||||||
|
EOF
|
||||||
|
done
|
||||||
|
unset -f exit
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ "$1" = "--worker" ]; then
|
||||||
|
shift
|
||||||
|
json_worker "$*"
|
||||||
|
exit $?
|
||||||
|
fi
|
||||||
|
|
||||||
|
# pipefail: return value of a pipeline is the value of the last (rightmost) command to exit with a non-zero status
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
cat ${_CACHE_PACKAGE_GLOBAL} ${_CACHE_PACKAGE_LOCAL} | \
|
||||||
|
xargs --max-args=64 --max-procs="$(nproc)" "$0" --worker
|
||||||
|
exit $?
|
@ -49,6 +49,12 @@ class HistoryEvent:
|
|||||||
return(value)
|
return(value)
|
||||||
return default
|
return default
|
||||||
|
|
||||||
|
def calc_pct(a, b):
|
||||||
|
if b > 0.0:
|
||||||
|
return (a / b) * 100
|
||||||
|
else:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
def pct_brackets(pct):
|
def pct_brackets(pct):
|
||||||
spct = "%04.1f" % pct
|
spct = "%04.1f" % pct
|
||||||
if float(spct) >= 100.0:
|
if float(spct) >= 100.0:
|
||||||
@ -151,7 +157,10 @@ for event in events:
|
|||||||
for slot in slots:
|
for slot in slots:
|
||||||
for status in slots[slot]["statuses"]:
|
for status in slots[slot]["statuses"]:
|
||||||
if status == "IDLE":
|
if status == "IDLE":
|
||||||
slots[slot]["statuses"][status]["total"] += (last_active - slots[slot]["statuses"][status]["start"])
|
if slots[slot]["statuses"]["FAILED"]["enabled"] == True:
|
||||||
|
slots[slot]["statuses"][status]["total"] += (last_active - slots[slot]["statuses"]["FAILED"]["start"])
|
||||||
|
else:
|
||||||
|
slots[slot]["statuses"][status]["total"] += (last_active - slots[slot]["statuses"][status]["start"])
|
||||||
elif slots[slot]["statuses"][status]["enabled"] == True:
|
elif slots[slot]["statuses"][status]["enabled"] == True:
|
||||||
if status != "FAILED":
|
if status != "FAILED":
|
||||||
incomplete = True
|
incomplete = True
|
||||||
@ -161,7 +170,7 @@ for slot in slots:
|
|||||||
|
|
||||||
# Summarise slot data by various criteria
|
# Summarise slot data by various criteria
|
||||||
summary = {}
|
summary = {}
|
||||||
cumaltive_total = 0
|
cumulative_count = cumulative_total = 0
|
||||||
for slot in slots:
|
for slot in slots:
|
||||||
acount = atotal = 0
|
acount = atotal = 0
|
||||||
scount = stotal = 0
|
scount = stotal = 0
|
||||||
@ -180,7 +189,8 @@ for slot in slots:
|
|||||||
for status in slots[slot]["statuses"]:
|
for status in slots[slot]["statuses"]:
|
||||||
ccount += slots[slot]["statuses"][status]["count"]
|
ccount += slots[slot]["statuses"][status]["count"]
|
||||||
ctotal += slots[slot]["statuses"][status]["total"]
|
ctotal += slots[slot]["statuses"][status]["total"]
|
||||||
cumaltive_total += ctotal
|
cumulative_count += ccount
|
||||||
|
cumulative_total += ctotal
|
||||||
|
|
||||||
summary[slot] = {"busy": {"count": acount, "total": atotal},
|
summary[slot] = {"busy": {"count": acount, "total": atotal},
|
||||||
"stalled": {"count": scount, "total": stotal},
|
"stalled": {"count": scount, "total": stotal},
|
||||||
@ -195,12 +205,12 @@ for slot in summary:
|
|||||||
elapsed = (ended - started)
|
elapsed = (ended - started)
|
||||||
|
|
||||||
print("Total Build Time: %s (wall clock)" % secs_to_hms(elapsed, blankzero=False))
|
print("Total Build Time: %s (wall clock)" % secs_to_hms(elapsed, blankzero=False))
|
||||||
print("Accum Build Time: %s (%d slots)\n" % (secs_to_hms(cumaltive_total, blankzero=False), len(slots)))
|
print("Accum Build Time: %s (%d slots)\n" % (secs_to_hms(cumulative_total, blankzero=False), len(slots)))
|
||||||
|
|
||||||
if incomplete:
|
if incomplete:
|
||||||
print("*** WARNING: active slots detected - build may be in progress/incomplete ***\n")
|
print("*** WARNING: active slots detected - build may be in progress/incomplete ***\n")
|
||||||
|
|
||||||
cum_total = cum_count = cum_pct = 0
|
cum_total = 0.0
|
||||||
print("Breakdown by status (all slots):\n")
|
print("Breakdown by status (all slots):\n")
|
||||||
print(" Status Usage ( Pct ) Count State")
|
print(" Status Usage ( Pct ) Count State")
|
||||||
for status in sorted(ALL_STATUSES):
|
for status in sorted(ALL_STATUSES):
|
||||||
@ -211,8 +221,8 @@ for status in sorted(ALL_STATUSES):
|
|||||||
count += slots[slot]["statuses"][status]["count"]
|
count += slots[slot]["statuses"][status]["count"]
|
||||||
total += slots[slot]["statuses"][status]["total"]
|
total += slots[slot]["statuses"][status]["total"]
|
||||||
|
|
||||||
pct = (100 * total / elapsed / len(slots)) if elapsed > 0.0 else 0.0
|
pct = calc_pct(total, cumulative_total)
|
||||||
cum_pct += pct
|
cum_total += total
|
||||||
|
|
||||||
if status in BUSY_STATUSES:
|
if status in BUSY_STATUSES:
|
||||||
stype = "busy"
|
stype = "busy"
|
||||||
@ -221,10 +231,9 @@ for status in sorted(ALL_STATUSES):
|
|||||||
else:
|
else:
|
||||||
stype = ""
|
stype = ""
|
||||||
print(" %-7s %12s %-7s %-5d %-5s" % (status, secs_to_hms(total, blankzero=True), pct_brackets(pct), count, stype))
|
print(" %-7s %12s %-7s %-5d %-5s" % (status, secs_to_hms(total, blankzero=True), pct_brackets(pct), count, stype))
|
||||||
cum_count += count
|
|
||||||
cum_total += total
|
|
||||||
print(" -------------------------------------")
|
print(" -------------------------------------")
|
||||||
print(" %-7s %12s %-7s %-5d" % ("TOTAL", secs_to_hms(cum_total, blankzero=True), pct_brackets(cum_pct), cum_count))
|
print(" %-7s %12s %-7s %-5d" % ("TOTAL", secs_to_hms(cumulative_total, blankzero=True), \
|
||||||
|
pct_brackets(calc_pct(cum_total, cumulative_total)), cumulative_count))
|
||||||
print("")
|
print("")
|
||||||
|
|
||||||
print("Peak concurrency: %d out of %d slots\n" % (peak, len(slots)))
|
print("Peak concurrency: %d out of %d slots\n" % (peak, len(slots)))
|
||||||
@ -238,30 +247,25 @@ print("#Rank Slot Usage ( Pct ) | # of Slots Usage ( Pct
|
|||||||
lines = []
|
lines = []
|
||||||
|
|
||||||
busy_total = 0
|
busy_total = 0
|
||||||
busy_pct = 0
|
|
||||||
for rank, slot in enumerate(sorted(summary, key=get_busy_total, reverse=True)):
|
for rank, slot in enumerate(sorted(summary, key=get_busy_total, reverse=True)):
|
||||||
pct = (100 * summary[slot]["busy"]["total"] / elapsed / len(slots)) if elapsed > 0.0 else 0.0
|
pct = calc_pct(summary[slot]["busy"]["total"], cumulative_total)
|
||||||
state = "active" if slots[slot]["isactive"] == True else " "
|
state = "active" if slots[slot]["isactive"] == True else " "
|
||||||
stime = secs_to_hms(summary[slot]["busy"]["total"], blankzero=True)
|
stime = secs_to_hms(summary[slot]["busy"]["total"], blankzero=True)
|
||||||
busy_total += summary[slot]["busy"]["total"]
|
busy_total += summary[slot]["busy"]["total"]
|
||||||
busy_pct += pct
|
|
||||||
lines.append("%s %s %-7s %6s |" % (slot, stime, pct_brackets(pct), state))
|
lines.append("%s %s %-7s %6s |" % (slot, stime, pct_brackets(pct), state))
|
||||||
|
|
||||||
concurrent_total = 0
|
concurrent_total = 0
|
||||||
concurrent_pct = 0.0
|
|
||||||
for rank, concurrentn in enumerate(sorted(concurrency, key=get_concurrent_val, reverse=True)):
|
for rank, concurrentn in enumerate(sorted(concurrency, key=get_concurrent_val, reverse=True)):
|
||||||
concurrent = concurrency[concurrentn]
|
concurrent = concurrency[concurrentn]
|
||||||
pct = (100 * concurrent["total"] / elapsed / len(slots)) if elapsed > 0.0 else 0.0
|
pct = calc_pct(concurrent["total"], cumulative_total)
|
||||||
stime = secs_to_hms(concurrent["total"], blankzero=True)
|
stime = secs_to_hms(concurrent["total"], blankzero=True)
|
||||||
concurrent_total += concurrent["total"]
|
concurrent_total += concurrent["total"]
|
||||||
concurrent_pct += pct
|
|
||||||
lines[rank] += " %02d %s %-7s" % (concurrentn, stime, pct_brackets(pct))
|
lines[rank] += " %02d %s %-7s" % (concurrentn, stime, pct_brackets(pct))
|
||||||
|
|
||||||
for rank, line in enumerate(lines):
|
for rank, line in enumerate(lines):
|
||||||
print(" #%02d %s" % (rank + 1, line))
|
print(" #%02d %s" % (rank + 1, line))
|
||||||
|
|
||||||
bpct = spct = "%04.1f" % pct
|
|
||||||
print("-----------------------------------------+---------------------------------")
|
print("-----------------------------------------+---------------------------------")
|
||||||
print(" TOTALS %s %-7s %s %-7s" %
|
print(" TOTALS %s %-7s %s %-7s" %
|
||||||
(secs_to_hms(busy_total, blankzero=True), pct_brackets(busy_pct),
|
(secs_to_hms(busy_total, blankzero=True), pct_brackets(calc_pct(busy_total, cumulative_total)),
|
||||||
secs_to_hms(concurrent_total, blankzero=True), pct_brackets(concurrent_pct)))
|
secs_to_hms(concurrent_total, blankzero=True), pct_brackets(calc_pct(concurrent_total, cumulative_total))))
|
||||||
|
@ -6,24 +6,6 @@
|
|||||||
unset _CACHE_PACKAGE_LOCAL _CACHE_PACKAGE_GLOBAL _DEBUG_DEPENDS_LIST _DEBUG_PACKAGE_LIST
|
unset _CACHE_PACKAGE_LOCAL _CACHE_PACKAGE_GLOBAL _DEBUG_DEPENDS_LIST _DEBUG_PACKAGE_LIST
|
||||||
|
|
||||||
. config/options ""
|
. config/options ""
|
||||||
. config/multithread
|
|
||||||
|
|
||||||
# Fake the parallel command if GNU parallel is not available - slow, but works.
|
${SCRIPTS}/pkgjson | ${SCRIPTS}/genbuildplan.py --show-wants --build ${@:-image} --warn-invalid ${GENFLAGS} || \
|
||||||
fake_parallel() {
|
|
||||||
while read -r line; do
|
|
||||||
json_worker "${line}"
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
PARALLEL_BIN=${TOOLCHAIN}/bin/parallel
|
|
||||||
|
|
||||||
[ -x ${PARALLEL_BIN} ] || PARALLEL_BIN=parallel
|
|
||||||
command -v ${PARALLEL_BIN} >/dev/null || PARALLEL_BIN=fake_parallel
|
|
||||||
|
|
||||||
# pipefail: return value of a pipeline is the value of the last (rightmost) command to exit with a non-zero status
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
cat ${_CACHE_PACKAGE_GLOBAL} ${_CACHE_PACKAGE_LOCAL} | \
|
|
||||||
${PARALLEL_BIN} --plain --no-notice --max-args 30 --halt now,fail=1 json_worker | \
|
|
||||||
${SCRIPTS}/genbuildplan.py --no-reorder --show-wants --build ${@:-image} --warn-invalid ${GENFLAGS} || \
|
|
||||||
die "FAILURE: Unable to generate plan"
|
die "FAILURE: Unable to generate plan"
|
||||||
|
Loading…
x
Reference in New Issue
Block a user