Skip to content

Commit 1e7c044

Browse files
authored
add support for spark 3.1.1(emr 6.3.0) (#61)
1 parent 04b5676 commit 1e7c044

File tree

13 files changed

+357
-5
lines changed

13 files changed

+357
-5
lines changed

Makefile

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ all: build test
3232

3333
init:
3434
pip install pipenv --upgrade
35+
pipenv run pip install --upgrade pip
3536
pipenv install
3637
cp {Pipfile,Pipfile.lock,setup.py} ${BUILD_CONTEXT}
3738

@@ -82,7 +83,7 @@ test-local: install-container-library build-tests
8283
test-sagemaker: build-tests
8384
# Separate `pytest` invocation without parallelization:
8485
# History server tests can't run in parallel since they use the same container name.
85-
pipenv run pytest -s -vv test/integration/history \
86+
pipenv run pytest --reruns 3 -s -vv test/integration/history \
8687
--repo=$(DEST_REPO) --tag=$(VERSION) --durations=0 \
8788
--spark-version=$(SPARK_VERSION) \
8889
--framework-version=$(FRAMEWORK_VERSION) \
@@ -91,7 +92,7 @@ test-sagemaker: build-tests
9192
--region ${REGION} \
9293
--domain ${AWS_DOMAIN}
9394
# OBJC_DISABLE_INITIALIZE_FORK_SAFETY: https://github.com/ansible/ansible/issues/32499#issuecomment-341578864
94-
OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES pipenv run pytest --workers auto -s -vv test/integration/sagemaker \
95+
OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES pipenv run pytest --workers auto --reruns 3 -s -vv test/integration/sagemaker \
9596
--repo=$(DEST_REPO) --tag=$(VERSION) --durations=0 \
9697
--spark-version=$(SPARK_VERSION) \
9798
--framework-version=$(FRAMEWORK_VERSION) \
@@ -146,4 +147,4 @@ release:
146147

147148

148149
# Targets that don't create a file with the same name as the target.
149-
.PHONY: all build test test-all clean clean-all release whitelist build-container-library
150+
.PHONY: all build test test-all clean clean-all release whitelist build-container-library

Pipfile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ sagemaker = "==2.30.0"
3131
smspark = {editable = true, path = "."}
3232
importlib-metadata = "==3.7.3"
3333
pytest-parallel = "==0.1.0"
34+
pytest-rerunfailures = "10.0"
3435

3536
[requires]
3637
python_version = "3.7"

new_images.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
---
22
new_images:
3-
- spark: "3.0.0"
3+
- spark: "3.1.1"
44
use-case: "processing"
55
processors: ["cpu"]
66
python: ["py37"]
7-
sm_version: "1.3"
7+
sm_version: "1.0"
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
echo "Not implemented"
Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,109 @@
1+
FROM 137112412989.dkr.ecr.us-west-2.amazonaws.com/amazonlinux:2
2+
ARG REGION
3+
ENV AWS_REGION ${REGION}
4+
RUN yum clean all
5+
RUN yum update -y
6+
RUN yum install -y awscli bigtop-utils curl gcc gzip unzip python3 python3-setuptools python3-pip python-devel python3-devel python-psutil gunzip tar wget liblapack* libblas* libopencv* libopenblas*
7+
8+
# install nginx amazonlinux:2.0.20200304.0 does not have nginx, so need to install epel-release first
9+
RUN wget https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
10+
RUN yum install -y epel-release-latest-7.noarch.rpm
11+
RUN yum install -y nginx
12+
13+
RUN rm -rf /var/cache/yum
14+
15+
ENV PYTHONDONTWRITEBYTECODE=1
16+
ENV PYTHONUNBUFFERED=1
17+
# http://blog.stuart.axelbrooke.com/python-3-on-spark-return-of-the-pythonhashseed
18+
ENV PYTHONHASHSEED 0
19+
ENV PYTHONIOENCODING UTF-8
20+
ENV PIP_DISABLE_PIP_VERSION_CHECK 1
21+
22+
# Install EMR Spark/Hadoop
23+
ENV HADOOP_HOME /usr/lib/hadoop
24+
ENV HADOOP_CONF_DIR /usr/lib/hadoop/etc/hadoop
25+
ENV SPARK_HOME /usr/lib/spark
26+
27+
COPY yum/emr-apps.repo /etc/yum.repos.d/emr-apps.repo
28+
29+
# Install hadoop / spark dependencies from EMR's yum repository for Spark optimizations.
30+
# replace placeholder with region in repository URL
31+
RUN sed -i "s/REGION/${AWS_REGION}/g" /etc/yum.repos.d/emr-apps.repo
32+
RUN adduser -N hadoop
33+
34+
# These packages are a subset of what EMR installs in a cluster with the
35+
# "hadoop", "spark", and "hive" applications.
36+
# They include EMR-optimized libraries and extras.
37+
RUN yum install -y aws-hm-client \
38+
aws-java-sdk \
39+
aws-sagemaker-spark-sdk \
40+
emr-goodies \
41+
emr-ruby \
42+
emr-scripts \
43+
emr-s3-select \
44+
emrfs \
45+
hadoop \
46+
hadoop-client \
47+
hadoop-hdfs \
48+
hadoop-hdfs-datanode \
49+
hadoop-hdfs-namenode \
50+
hadoop-httpfs \
51+
hadoop-kms \
52+
hadoop-lzo \
53+
hadoop-yarn \
54+
hadoop-yarn-nodemanager \
55+
hadoop-yarn-proxyserver \
56+
hadoop-yarn-resourcemanager \
57+
hadoop-yarn-timelineserver \
58+
hive \
59+
hive-hcatalog \
60+
hive-hcatalog-server \
61+
hive-jdbc \
62+
hive-server2 \
63+
python37-numpy \
64+
s3-dist-cp \
65+
spark-core \
66+
spark-datanucleus \
67+
spark-external \
68+
spark-history-server \
69+
spark-python
70+
71+
72+
# Point Spark at proper python binary
73+
ENV PYSPARK_PYTHON=/usr/bin/python3
74+
75+
# Setup Spark/Yarn/HDFS user as root
76+
ENV PATH="/usr/bin:/opt/program:${PATH}"
77+
ENV YARN_RESOURCEMANAGER_USER="root"
78+
ENV YARN_NODEMANAGER_USER="root"
79+
ENV HDFS_NAMENODE_USER="root"
80+
ENV HDFS_DATANODE_USER="root"
81+
ENV HDFS_SECONDARYNAMENODE_USER="root"
82+
83+
# Set up bootstrapping program and Spark configuration
84+
COPY *.whl /opt/program/
85+
RUN /usr/bin/python3 -m pip install pipenv --upgrade
86+
COPY hadoop-config /opt/hadoop-config
87+
COPY nginx-config /opt/nginx-config
88+
COPY aws-config /opt/aws-config
89+
COPY Pipfile /opt/program/
90+
COPY Pipfile.lock /opt/program/
91+
COPY setup.py /opt/program/
92+
ENV PIPENV_PIPFILE=/opt/program/Pipfile
93+
# Use --system flag, so it will install all packages into the system python,
94+
# and not into the virtualenv. Since docker containers do not need to have virtualenvs
95+
RUN pipenv install --system
96+
RUN /usr/bin/python3 -m pip install /opt/program/*.whl
97+
98+
# Setup container bootstrapper
99+
COPY container-bootstrap-config /opt/container-bootstrap-config
100+
RUN chmod +x /opt/container-bootstrap-config/bootstrap.sh
101+
RUN /opt/container-bootstrap-config/bootstrap.sh
102+
103+
# With this config, spark history server will not run as daemon, otherwise there
104+
# will be no server running and container will terminate immediately
105+
ENV SPARK_NO_DAEMONIZE TRUE
106+
107+
WORKDIR $SPARK_HOME
108+
109+
ENTRYPOINT ["smspark-submit"]
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
<?xml version="1.0" encoding="UTF-8"?>
2+
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+
<!-- Put site-specific property overrides in this file. -->
4+
5+
<configuration>
6+
<property>
7+
<name>fs.defaultFS</name>
8+
<value>hdfs://nn_uri/</value>
9+
<description>NameNode URI</description>
10+
</property>
11+
<property>
12+
<name>fs.s3a.aws.credentials.provider</name>
13+
<value>com.amazonaws.auth.DefaultAWSCredentialsProviderChain</value>
14+
<description>AWS S3 credential provider</description>
15+
</property>
16+
<property>
17+
<name>fs.s3.impl</name>
18+
<value>org.apache.hadoop.fs.s3a.S3AFileSystem</value>
19+
<description>s3a filesystem implementation</description>
20+
</property>
21+
<property>
22+
<name>fs.AbstractFileSystem.s3a.imp</name>
23+
<value>org.apache.hadoop.fs.s3a.S3A</value>
24+
<description>s3a filesystem implementation</description>
25+
</property>
26+
</configuration>
Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,67 @@
1+
<?xml version="1.0" encoding="UTF-8"?>
2+
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+
<!-- Put site-specific property overrides in this file. -->
4+
5+
<configuration>
6+
<property>
7+
<name>dfs.datanode.data.dir</name>
8+
<value>file:///opt/amazon/hadoop/hdfs/datanode</value>
9+
<description>Comma separated list of paths on the local filesystem of a DataNode where it should store its\
10+
blocks.</description>
11+
</property>
12+
13+
<property>
14+
<name>dfs.namenode.name.dir</name>
15+
<value>file:///opt/amazon/hadoop/hdfs/namenode</value>
16+
<description>Path on the local filesystem where the NameNode stores the namespace and transaction logs per\
17+
sistently.</description>
18+
</property>
19+
20+
<!-- Fix for "Failed to replace a bad datanode on the existing pipeline due to no more good datanodes being available to try"
21+
From https://community.cloudera.com/t5/Support-Questions/Failed-to-replace-a-bad-datanode-on-the-existing-pipeline/td-p/207711
22+
This issue can be caused by Continuous network issues causing or repeated packet drops. This specially happens when data is
23+
being written to any one of the DataNode which is in process of pipelining the data to next datanode and due to any communicaiton
24+
issue it may lead to pipeline failure. We are only see this issue in small regions. -->
25+
<property>
26+
<name>dfs.client.block.write.replace-datanode-on-failure.enable</name>
27+
<value>true</value>
28+
<description>
29+
If there is a datanode/network failure in the write pipeline,
30+
DFSClient will try to remove the failed datanode from the pipeline
31+
and then continue writing with the remaining datanodes. As a result,
32+
the number of datanodes in the pipeline is decreased. The feature is
33+
to add new datanodes to the pipeline.
34+
35+
This is a site-wide property to enable/disable the feature.
36+
37+
When the cluster size is extremely small, e.g. 3 nodes or less, cluster
38+
administrators may want to set the policy to NEVER in the default
39+
configuration file or disable this feature. Otherwise, users may
40+
experience an unusually high rate of pipeline failures since it is
41+
impossible to find new datanodes for replacement.
42+
43+
See also dfs.client.block.write.replace-datanode-on-failure.policy
44+
</description>
45+
</property>
46+
47+
<property>
48+
<name>dfs.client.block.write.replace-datanode-on-failure.policy</name>
49+
<value>ALWAYS</value>
50+
<description>
51+
This property is used only if the value of
52+
dfs.client.block.write.replace-datanode-on-failure.enable is true.
53+
54+
ALWAYS: always add a new datanode when an existing datanode is
55+
removed.
56+
57+
NEVER: never add a new datanode.
58+
59+
DEFAULT:
60+
Let r be the replication number.
61+
Let n be the number of existing datanodes.
62+
Add a new datanode only if r is greater than or equal to 3 and either
63+
(1) floor(r/2) is greater than or equal to n; or
64+
(2) r is greater than n and the block is hflushed/appended.
65+
</description>
66+
</property>
67+
</configuration>
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
spark.driver.extraClassPath /usr/lib/hadoop-lzo/lib/*:/usr/lib/hadoop/hadoop-aws.jar:/usr/share/aws/aws-java-sdk/*:/usr/share/aws/emr/emrfs/conf:/usr/share/aws/emr/emrfs/lib/*:/usr/share/aws/emr/emrfs/auxlib/*:/usr/share/aws/emr/goodies/lib/emr-spark-goodies.jar:/usr/share/aws/emr/security/conf:/usr/share/aws/emr/security/lib/*:/usr/share/aws/hmclient/lib/aws-glue-datacatalog-spark-client.jar:/usr/share/java/Hive-JSON-Serde/hive-openx-serde.jar:/usr/share/aws/sagemaker-spark-sdk/lib/sagemaker-spark-sdk.jar:/usr/share/aws/emr/s3select/lib/emr-s3-select-spark-connector.jar
2+
spark.driver.extraLibraryPath /usr/lib/hadoop/lib/native:/usr/lib/hadoop-lzo/lib/native
3+
spark.executor.extraClassPath /usr/lib/hadoop-lzo/lib/*:/usr/lib/hadoop/hadoop-aws.jar:/usr/share/aws/aws-java-sdk/*:/usr/share/aws/emr/emrfs/conf:/usr/share/aws/emr/emrfs/lib/*:/usr/share/aws/emr/emrfs/auxlib/*:/usr/share/aws/emr/goodies/lib/emr-spark-goodies.jar:/usr/share/aws/emr/security/conf:/usr/share/aws/emr/security/lib/*:/usr/share/aws/hmclient/lib/aws-glue-datacatalog-spark-client.jar:/usr/share/java/Hive-JSON-Serde/hive-openx-serde.jar:/usr/share/aws/sagemaker-spark-sdk/lib/sagemaker-spark-sdk.jar:/usr/share/aws/emr/s3select/lib/emr-s3-select-spark-connector.jar
4+
spark.executor.extraLibraryPath /usr/lib/hadoop/lib/native:/usr/lib/hadoop-lzo/lib/native
5+
spark.driver.host=sd_host
6+
spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version=2
7+
8+
# Fix for "Uncaught exception: org.apache.spark.rpc.RpcTimeoutException: Cannot
9+
# receive any reply from 10.0.109.30:35219 in 120 seconds.""
10+
spark.rpc.askTimeout=300s
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
#EMPTY FILE AVOID OVERRIDDING ENV VARS
2+
# Specifically, without copying the empty file, SPARK_HISTORY_OPTS will be overriden,
3+
# spark.history.ui.port defaults to 18082, and spark.eventLog.dir defaults to local fs
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
<?xml version="1.0"?>
2+
<!-- Site specific YARN configuration properties -->
3+
<configuration>
4+
<property>
5+
<name>yarn.resourcemanager.hostname</name>
6+
<value>rm_hostname</value>
7+
<description>The hostname of the RM.</description>
8+
</property>
9+
<property>
10+
<name>yarn.nodemanager.hostname</name>
11+
<value>nm_hostname</value>
12+
<description>The hostname of the NM.</description>
13+
</property>
14+
<property>
15+
<name>yarn.nodemanager.webapp.address</name>
16+
<value>nm_webapp_address</value>
17+
</property>
18+
<property>
19+
<name>yarn.nodemanager.vmem-pmem-ratio</name>
20+
<value>5</value>
21+
<description>Ratio between virtual memory to physical memory.</description>
22+
</property>
23+
<property>
24+
<name>yarn.resourcemanager.am.max-attempts</name>
25+
<value>1</value>
26+
<description>The maximum number of application attempts.</description>
27+
</property>
28+
<property>
29+
<name>yarn.nodemanager.env-whitelist</name>
30+
<value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME,AWS_CONTAINER_CREDENTIALS_RELATIVE_URI</value>
31+
<description>Environment variable whitelist</description>
32+
</property>
33+
34+
</configuration>

0 commit comments

Comments
 (0)