将 SELINUX=enforcing 改为SELINUX=disabled
4.1 打开配置文件
vim /etc/ssh/sshd_config4.2 将注释去掉
RSAAuthentication yes # 启用 RSA 认证 PubkeyAuthentication yes # 启用公钥私钥配对认证方式 AuthorizedKeysFile .ssh/authorized_keys # 公钥文件路径(和上面生成的文件同)4.3 重启ssh服务
systemctl restart sshd1.1 下载 jdk-8u131-linux-x64.rpm 安装:
rpm -ivh jdk-8u131-linux-x64.rpm1.2 添加Java环境变量,在/etc/profile中添加:
export JAVA_HOME=/usr/java/jdk1.8.0_131/1.3 保存后刷新配置
source /etc/profile2.1 下载 scala-2.12.2.rpm 安装:
rpm -ivh scala-2.12.2.rpm2.2 添加环境变量,在/etc/profile中添加:
export SCALA_HOME=/usr/share/scala2.3 保存后刷新配置
source /etc/profile1.1 复制样板机 1.2 设置网络
# 修改配置 vim /etc/sysconfig/network-scripts/ifcfg-enp0s3 # 修改下面IP 建议修改下 UUID IPADDR=192.168.1.225 # 重启网络 systemctl restart network3.1 修改 /etc/profile
vim /etc/profile # 增加如下 export HADOOP_HOME=/opt/hadoop-2.7.3/ export PATH="$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH" export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop export YARN_CONF_DIR=$HADOOP_HOME/etc/hadoop # 更新 source /etc/profile3.2 修改$HADOOP_HOME/etc/hadoop/hadoop-env.sh,修改JAVA_HOME 如下:
export JAVA_HOME=/usr/java/jdk1.8.0_131/3.3 修改$HADOOP_HOME/etc/hadoop/slaves,将原来的localhost删除,改成如下内容:
slave1 slave23.4 修改$HADOOP_HOME/etc/hadoop/core-site.xml
<configuration> <property> <name>fs.defaultFS</name> <value>hdfs://master:9000</value> </property> <property> <name>io.file.buffer.size</name> <value>131072</value> </property> <property> <name>hadoop.tmp.dir</name> <value>/opt/hadoop-2.7.3/tmp</value> </property> </configuration>3.5 修改$HADOOP_HOME/etc/hadoop/hdfs-site.xml
<configuration> <property> <name>dfs.namenode.secondary.http-address</name> <value>master:50090</value> </property> <property> <name>dfs.replication</name> <value>2</value> </property> <property> <name>dfs.namenode.name.dir</name> <value>file:/opt/hadoop-2.7.3/hdfs/name</value> </property> <property> <name>dfs.datanode.data.dir</name> <value>file:/opt/hadoop-2.7.3/hdfs/data</value> </property> </configuration>3.6 修改$HADOOP_HOME/etc/hadoop/mapred-site.xml 复制template,生成xml:
cp mapred-site.xml.template mapred-site.xml <configuration> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <property> <name>mapreduce.jobhistory.address</name> <value>master:10020</value> </property> <property> <name>mapreduce.jobhistory.address</name> <value>master:19888</value> </property> </configuration>3.7 修改$HADOOP_HOME/etc/hadoop/yarn-site.xml
<configuration> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <property> <name>yarn.resourcemanager.address</name> <value>master:8032</value> </property> <property> <name>yarn.resourcemanager.scheduler.address</name> <value>master:8030</value> </property> <property> <name>yarn.resourcemanager.resource-tracker.address</name> <value>master:8031</value> </property> <property> <name>yarn.resourcemanager.admin.address</name> <value>master:8033</value> </property> <property> <name>yarn.resourcemanager.webapp.address</name> <value>master:8088</value> </property> </configuration>启动
/opt/hadoop-2.7.3/sbin/start-all.sh测试
# master显示 SecondaryNameNode、ResourceManager、NameNode # slave显示 NodeManager、DataNode jps3.1 修改/etc/profie
export SPARK_HOME=/opt/spark-2.1.0-bin-hadoop2.7/ export PATH="$SPARK_HOME/bin:$PATH" # 更新 source /etc/profile3.2 修改$SPARK_HOME/conf/spark-env.sh
cp spark-env.sh.template spark-env.sh #配置内容如下: export SCALA_HOME=/usr/share/scala export JAVA_HOME=/usr/java/jdk1.8.0_131/ export SPARK_MASTER_IP=master export SPARK_WORKER_MEMORY=1g export HADOOP_CONF_DIR=/opt/hadoop-2.7.3/etc/hadoop3.3 修改$SPARK_HOME/conf/slaves
cp slaves.template slaves master slave1 slave2