Skip to content

Commit

Permalink
Format init actions scripts with IntelliJ formatter (#686)
Browse files Browse the repository at this point in the history
  • Loading branch information
medb authored Dec 11, 2019
1 parent d74ac51 commit 1ad01a0
Show file tree
Hide file tree
Showing 36 changed files with 488 additions and 511 deletions.
47 changes: 23 additions & 24 deletions beam/beam.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,23 +18,22 @@ readonly BEAM_IMAGE_VERSION_DEFAULT="master"
readonly BEAM_IMAGE_REPOSITORY_KEY="beam-image-repository"
readonly BEAM_IMAGE_REPOSITORY_DEFAULT="apache.bintray.io/beam"


readonly START_FLINK_YARN_SESSION_METADATA_KEY='flink-start-yarn-session'
# Set this to true to start a flink yarn session at initialization time.
readonly START_FLINK_YARN_SESSION_DEFAULT=true

function is_master() {
local role="$(/usr/share/google/get_metadata_value attributes/dataproc-role)"
if [[ "$role" == 'Master' ]] ; then
if [[ "$role" == 'Master' ]]; then
true
else
false
fi
}

function get_artifacts_dir() {
/usr/share/google/get_metadata_value "attributes/${ARTIFACTS_GCS_PATH_METADATA_KEY}" \
|| echo "gs://$(/usr/share/google/get_metadata_value "attributes/dataproc-bucket")/beam-artifacts"
/usr/share/google/get_metadata_value "attributes/${ARTIFACTS_GCS_PATH_METADATA_KEY}" ||
echo "gs://$(/usr/share/google/get_metadata_value "attributes/dataproc-bucket")/beam-artifacts"
}

function download_snapshot() {
Expand All @@ -49,17 +48,17 @@ function download_snapshot() {

function flink_master_url() {
local start_flink_yarn_session="$(/usr/share/google/get_metadata_value \
"attributes/${START_FLINK_YARN_SESSION_METADATA_KEY}" \
|| echo "${START_FLINK_YARN_SESSION_DEFAULT}")"
# TODO: delete this workaround when the beam job service is able to understand
"attributes/${START_FLINK_YARN_SESSION_METADATA_KEY}" ||
echo "${START_FLINK_YARN_SESSION_DEFAULT}")"
# TODO: delete this workaround when the beam job service is able to understand
# flink in yarn mode.
if ${start_flink_yarn_session} ; then
if ${start_flink_yarn_session}; then
# grab final field from the first yarn application that contains 'flink'
yarn application -list \
| grep -i 'flink' \
| head -n1 \
| awk -F $'\t' '{print $9}' \
| cut -c8-
yarn application -list |
grep -i 'flink' |
head -n1 |
awk -F $'\t' '{print $9}' |
cut -c8-
else
echo "localhost:8081"
fi
Expand All @@ -69,8 +68,8 @@ function install_job_service() {
local master_url="$(/usr/share/google/get_metadata_value attributes/dataproc-master)"
local artifacts_dir="$(get_artifacts_dir)"
local release_snapshot_url="$(/usr/share/google/get_metadata_value \
"attributes/${RELEASE_SNAPSHOT_URL_METADATA_KEY}" \
|| echo "${RELEASE_SNAPSHOT_URL_DEFAULT}")"
"attributes/${RELEASE_SNAPSHOT_URL_METADATA_KEY}" ||
echo "${RELEASE_SNAPSHOT_URL_DEFAULT}")"

echo "Retrieving Beam Job Service snapshot from ${release_snapshot_url}"

Expand All @@ -84,7 +83,7 @@ function install_job_service() {
mkdir -p "${SERVICE_WORKING_DIR}"
chown -R "${SERVICE_WORKING_USER}" "${SERVICE_WORKING_DIR}"

cat > "/etc/systemd/system/beam-job-service.service" <<EOF
cat >"/etc/systemd/system/beam-job-service.service" <<EOF
[Unit]
Description=Beam Job Service
After=default.target
Expand All @@ -103,7 +102,7 @@ Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl enable beam-job-service
systemctl enable beam-job-service
}

function run_job_service() {
Expand All @@ -112,11 +111,11 @@ function run_job_service() {

function pull_beam_images() {
local beam_image_version="$(/usr/share/google/get_metadata_value \
"attributes/${BEAM_IMAGE_VERSION_METADATA_KEY}" \
|| echo "${BEAM_IMAGE_VERSION_DEFAULT}")"
"attributes/${BEAM_IMAGE_VERSION_METADATA_KEY}" ||
echo "${BEAM_IMAGE_VERSION_DEFAULT}")"
local image_repo="$(/usr/share/google/get_metadata_value \
"attributes/${BEAM_IMAGE_REPOSITORY_KEY}" \
|| echo "${BEAM_IMAGE_REPOSITORY_DEFAULT}")"
"attributes/${BEAM_IMAGE_REPOSITORY_KEY}" ||
echo "${BEAM_IMAGE_REPOSITORY_DEFAULT}")"
# Pull beam images with `sudo -i` since if pulling from GCR, yarn will be
# configured with GCR authorization
sudo -u yarn -i docker pull "${image_repo}/go:${beam_image_version}"
Expand All @@ -131,9 +130,9 @@ function main() {
fi

local pull_images="$(/usr/share/google/get_metadata_value \
"attributes/${BEAM_IMAGE_ENABLE_PULL_METADATA_KEY}" \
|| echo "${BEAM_IMAGE_ENABLE_PULL_DEFAULT}")"
if ${pull_images} ; then
"attributes/${BEAM_IMAGE_ENABLE_PULL_METADATA_KEY}" ||
echo "${BEAM_IMAGE_ENABLE_PULL_DEFAULT}")"
if ${pull_images}; then
pull_beam_images
fi
}
Expand Down
12 changes: 6 additions & 6 deletions beam/util/build-beam-artifacts.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

set -exuo pipefail

if [ "$#" -lt 2 ] ; then
if [ "$#" -lt 2 ]; then
echo "Usage: $0 <BEAM_JOB_SERVICE_DESTINATION> <BEAM_CONTAINER_IMAGE_DESTINATION> [<BEAM_SOURCE_VERSION> [<BEAM_SOURCE_DIRECTORY>]]" >&2
exit 1
fi
Expand All @@ -20,18 +20,18 @@ function build_job_service() {

function build_container() {
./gradlew docker
local images=($(docker images \
| grep '.*-docker-apache' \
| awk '{print $1}'))
for image in ${images} ; do
local images=($(docker images |
grep '.*-docker-apache' |
awk '{print $1}'))
for image in ${images}; do
local image_destination="${BEAM_CONTAINER_IMAGE_DESTINATION}/$(basename ${image}):${BEAM_SOURCE_VERSION}"
docker tag $image:latest ${image_destination}
docker push ${image_destination}
done
}

function main() {
if [[ $# -eq 4 ]] ; then
if [[ $# -eq 4 ]]; then
# if there is a 4th argument, use it as the beam source directory
pushd "$4"
else
Expand Down
20 changes: 10 additions & 10 deletions bigdl/bigdl.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@ cd /opt/intel-bigdl
wget -nv --timeout=30 --tries=5 --retry-connrefused "${BIGDL_DOWNLOAD_URL}"
unzip *.zip

JAR=`realpath lib/*.jar`
PYTHON_ZIP=`realpath lib/*.zip`
JAR=$(realpath lib/*.jar)
PYTHON_ZIP=$(realpath lib/*.zip)

cat << EOF >> /etc/spark/conf/spark-env.sh
cat <<EOF >>/etc/spark/conf/spark-env.sh
SPARK_DIST_CLASSPATH="\$SPARK_DIST_CLASSPATH:$JAR"
PYTHONPATH="\$PYTHONPATH:$PYTHON_ZIP"
EOF
Expand All @@ -30,20 +30,20 @@ EOF

if [[ "${ROLE}" == "Master" ]]; then
NUM_NODEMANAGERS_TARGET="${WORKER_COUNT}"
if (( "${WORKER_COUNT}" == 0 )); then
if (("${WORKER_COUNT}" == 0)); then
# Single node clusters have one node manager
NUM_NODEMANAGERS_TARGET=1
fi
# Wait for 5 minutes for Node Managers to register and run.
# Break early if the expected number of node managers have registered.
for (( i=0; i < 5*60; i++ )); do
for ((i = 0; i < 5 * 60; i++)); do
CURRENTLY_RUNNING_NODEMANAGERS=$(yarn node -list | grep RUNNING | wc -l)
if (( CURRENTLY_RUNNING_NODEMANAGERS == NUM_NODEMANAGERS_TARGET )); then
if ((CURRENTLY_RUNNING_NODEMANAGERS == NUM_NODEMANAGERS_TARGET)); then
break
fi
sleep 1
done
if (( CURRENTLY_RUNNING_NODEMANAGERS == 0 )); then
if ((CURRENTLY_RUNNING_NODEMANAGERS == 0)); then
echo "No node managers running. Cluster creation likely failed"
exit 1
fi
Expand Down Expand Up @@ -73,14 +73,14 @@ if [[ "${ROLE}" == "Master" ]]; then

# Check if it BigDL conf or Zoo
if [ -f conf/spark-bigdl.conf ]; then
cat conf/spark-bigdl.conf >> /etc/spark/conf/spark-defaults.conf
cat conf/spark-bigdl.conf >>/etc/spark/conf/spark-defaults.conf
elif [ -f conf/spark-analytics-zoo.conf ]; then
cat conf/spark-analytics-zoo.conf >> /etc/spark/conf/spark-defaults.conf
cat conf/spark-analytics-zoo.conf >>/etc/spark/conf/spark-defaults.conf
else
err "Can't find any suitable spark config for Intel BigDL/Zoo"
fi

cat << EOF >> /etc/spark/conf/spark-defaults.conf
cat <<EOF >>/etc/spark/conf/spark-defaults.conf
spark.dynamicAllocation.enabled=false
spark.executor.instances=${SPARK_NUM_EXECUTORS}
Expand Down
Loading

0 comments on commit 1ad01a0

Please sign in to comment.