Back to home page

OSCL-LXR

 
 

    


0001 #!/usr/bin/env bash
0002 
0003 #
0004 # Licensed to the Apache Software Foundation (ASF) under one or more
0005 # contributor license agreements.  See the NOTICE file distributed with
0006 # this work for additional information regarding copyright ownership.
0007 # The ASF licenses this file to You under the Apache License, Version 2.0
0008 # (the "License"); you may not use this file except in compliance with
0009 # the License.  You may obtain a copy of the License at
0010 #
0011 #    http://www.apache.org/licenses/LICENSE-2.0
0012 #
0013 # Unless required by applicable law or agreed to in writing, software
0014 # distributed under the License is distributed on an "AS IS" BASIS,
0015 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0016 # See the License for the specific language governing permissions and
0017 # limitations under the License.
0018 #
0019 
0020 #
0021 # Creates a Spark release candidate. The script will update versions, tag the branch,
0022 # build Spark binary packages and documentation, and upload maven artifacts to a staging
0023 # repository. There is also a dry run mode where only local builds are performed, and
0024 # nothing is uploaded to the ASF repos.
0025 #
0026 # Run with "-h" for options.
0027 #
0028 
0029 set -e
0030 SELF=$(cd $(dirname $0) && pwd)
0031 . "$SELF/release-util.sh"
0032 
0033 function usage {
0034   local NAME=$(basename $0)
0035   cat <<EOF
0036 Usage: $NAME [options]
0037 
0038 This script runs the release scripts inside a docker image. The image is hardcoded to be called
0039 "spark-rm" and will be re-generated (as needed) on every invocation of this script.
0040 
0041 Options are:
0042 
0043   -d [path]   : required: working directory (output will be written to an "output" directory in
0044                 the working directory).
0045   -n          : dry run mode. Performs checks and local builds, but do not upload anything.
0046   -t [tag]    : tag for the spark-rm docker image to use for building (default: "latest").
0047   -j [path]   : path to local JDK installation to use for building. By default the script will
0048                 use openjdk8 installed in the docker image.
0049   -s [step]   : runs a single step of the process; valid steps are: tag, build, docs, publish
0050 EOF
0051 }
0052 
0053 WORKDIR=
0054 IMGTAG=latest
0055 JAVA=
0056 RELEASE_STEP=
0057 while getopts ":d:hj:ns:t:" opt; do
0058   case $opt in
0059     d) WORKDIR="$OPTARG" ;;
0060     n) DRY_RUN=1 ;;
0061     t) IMGTAG="$OPTARG" ;;
0062     j) JAVA="$OPTARG" ;;
0063     s) RELEASE_STEP="$OPTARG" ;;
0064     h) usage ;;
0065     \?) error "Invalid option. Run with -h for help." ;;
0066   esac
0067 done
0068 
0069 if [ -z "$WORKDIR" ] || [ ! -d "$WORKDIR" ]; then
0070   error "Work directory (-d) must be defined and exist. Run with -h for help."
0071 fi
0072 
0073 if [ -d "$WORKDIR/output" ]; then
0074   read -p "Output directory already exists. Overwrite and continue? [y/n] " ANSWER
0075   if [ "$ANSWER" != "y" ]; then
0076     error "Exiting."
0077   fi
0078 fi
0079 
0080 cd "$WORKDIR"
0081 rm -rf "$WORKDIR/output"
0082 mkdir "$WORKDIR/output"
0083 
0084 get_release_info
0085 
0086 # Place all RM scripts and necessary data in a local directory that must be defined in the command
0087 # line. This directory is mounted into the image.
0088 for f in "$SELF"/*; do
0089   if [ -f "$f" ]; then
0090     cp "$f" "$WORKDIR"
0091   fi
0092 done
0093 
0094 GPG_KEY_FILE="$WORKDIR/gpg.key"
0095 fcreate_secure "$GPG_KEY_FILE"
0096 $GPG --export-secret-key --armor --pinentry-mode loopback --passphrase "$GPG_PASSPHRASE" "$GPG_KEY" > "$GPG_KEY_FILE"
0097 
0098 run_silent "Building spark-rm image with tag $IMGTAG..." "docker-build.log" \
0099   docker build -t "spark-rm:$IMGTAG" --build-arg UID=$UID "$SELF/spark-rm"
0100 
0101 # Write the release information to a file with environment variables to be used when running the
0102 # image.
0103 ENVFILE="$WORKDIR/env.list"
0104 fcreate_secure "$ENVFILE"
0105 
0106 function cleanup {
0107   rm -f "$ENVFILE"
0108   rm -f "$GPG_KEY_FILE"
0109 }
0110 
0111 trap cleanup EXIT
0112 
0113 cat > $ENVFILE <<EOF
0114 DRY_RUN=$DRY_RUN
0115 SKIP_TAG=$SKIP_TAG
0116 RUNNING_IN_DOCKER=1
0117 GIT_BRANCH=$GIT_BRANCH
0118 NEXT_VERSION=$NEXT_VERSION
0119 RELEASE_VERSION=$RELEASE_VERSION
0120 RELEASE_TAG=$RELEASE_TAG
0121 GIT_REF=$GIT_REF
0122 SPARK_PACKAGE_VERSION=$SPARK_PACKAGE_VERSION
0123 ASF_USERNAME=$ASF_USERNAME
0124 GIT_NAME=$GIT_NAME
0125 GIT_EMAIL=$GIT_EMAIL
0126 GPG_KEY=$GPG_KEY
0127 ASF_PASSWORD=$ASF_PASSWORD
0128 GPG_PASSPHRASE=$GPG_PASSPHRASE
0129 RELEASE_STEP=$RELEASE_STEP
0130 USER=$USER
0131 ZINC_OPTS=${RELEASE_ZINC_OPTS:-"-Xmx4g -XX:ReservedCodeCacheSize=2g"}
0132 EOF
0133 
0134 JAVA_VOL=
0135 if [ -n "$JAVA" ]; then
0136   echo "JAVA_HOME=/opt/spark-java" >> $ENVFILE
0137   JAVA_VOL="--volume $JAVA:/opt/spark-java"
0138 fi
0139 
0140 echo "Building $RELEASE_TAG; output will be at $WORKDIR/output"
0141 docker run -ti \
0142   --env-file "$ENVFILE" \
0143   --volume "$WORKDIR:/opt/spark-rm" \
0144   $JAVA_VOL \
0145   "spark-rm:$IMGTAG"