Commit 224fdec7 authored by Balazs's avatar Balazs

- changes to the parameter handling within the Service Orchestrator

- installation of software from dedicated Cinder Volume
- restructuring / refactoring
parent 4cf91e97
......@@ -4,12 +4,12 @@ parameters:
master_image:
type: string
description: Which image should be the master's os setup with?
default: "$master_image$"
default: $master_image$
slave_image:
type: string
description: Which image should be the slave's os setup with?
default: "$slave_image$"
default: $slave_image$
master_flavor:
type: string
......@@ -31,30 +31,25 @@ parameters:
description: slave's name (the index will be appended at the end)
default: $slavenode$
master_ssh_key:
type: string
description: name of public key of openstack user for master node
default: "$ssh_key$"
cluster_subnet_cidr:
type: string
description: CIDR of subnet that cluster is going to use
default: "$subnet_cidr$"
default: $subnet_cidr$
subnet_gateway_ip:
type: string
description: subnet's gateway's IP
default: "$subnet_gw_ip$"
default: $subnet_gw_ip$
allocation_pool_start:
type: string
description: allocation pool's starting IP address
default: "$subnet_allocation_pool_start$"
default: $subnet_allocation_pool_start$
allocation_pool_end:
type: string
description: allocation pool's last IP address
default: "$subnet_allocation_pool_end$"
default: $subnet_allocation_pool_end$
subnet_dns_nameservers:
type: comma_delimited_list
......@@ -62,7 +57,7 @@ parameters:
default: $subnet_dns_servers$
resources:
sshpublickey:
$users_ssh_public_key$ sshpublickey:
properties:
name: ssh_cluster_pub_key
public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDkbCR95uSaiAmspMoSPTKGSk3+5Vd6paKmdZQmdbHJd0a9HKLN87mYgc8qOURvxkjCEKMNYws2d2IQO0e7WELSfmMuQn/tUrLAiFTBfGsNxbarRO+UMvDvQwqQgRZ94wrjF/ELz4rQTe87MTuxpR79rbiGxHrHmJFxGAgS2evczwfmCKyyS15brWsNXaxDLyiBN3mWWz/O+4b3IEN/FKHYQcgy+SytGfHQBFkuOI5ETrRgeBy4ohGZzrn44TYjK4L2tLJJPT0rUffl81huVWoqcgwmkHETszDZm7OYkeLvX9g02JzeiKrqAFEvaWxjyreDkLEqKNF9oxi3SfdX72Yr mesz@CLT-MOB-T-6253-2
......@@ -83,7 +78,7 @@ resources:
hadoop_network:
type: OS::Neutron::Net
properties:
name: "hadoopNet"
name: hadoopNet
hadoop_subnet:
type: OS::Neutron::Subnet
......@@ -110,7 +105,7 @@ resources:
name: { get_param: master_name }
image: { get_param: master_image }
flavor: { get_param: master_flavor }
key_name: { get_resource: sshpublickey }
key_name: $master_ssh_key_entry$
networks:
- port: { get_resource: hadoop_port }
user_data:
......@@ -122,16 +117,12 @@ $paramsslave$
$slaves$
hadoop_ip:
type: OS::Neutron::FloatingIP
properties:
floating_network: "external-net"
$floating_ip_resource$
hadoop_router:
type: OS::Neutron::Router
properties:
external_gateway_info:
network: "external-net"
network: external-net
router_interface:
type: OS::Neutron::RouterInterface
......@@ -139,13 +130,20 @@ $slaves$
router_id: { get_resource: hadoop_router }
subnet_id: { get_resource: hadoop_subnet }
floating_ip_assoc:
type: OS::Neutron::FloatingIPAssociation
# software_volume:
# type: OS::Cinder::Volume
# properties:
# name: software_volume
# size: 6
# image: $image_id$
volume_attachment:
type: OS::Cinder::VolumeAttachment
properties:
floatingip_id: { get_resource: hadoop_ip }
port_id: { get_resource: hadoop_port }
volume_id: $image_id$
instance_uuid: { get_resource: hadoop_master }
mountpoint: /dev/vdb
$floating_ip_assoc$
outputs:
external_ip:
description: The IP address of the deployed master node
value: { get_attr: [ hadoop_ip, floating_ip_address ] }
$external_ip_output$
\ No newline at end of file
# configuration file for service orchestrator (currently just hadoop as a
# service)
# Everything has to be within the section "cluster" because of the SO's
# implementation. All data can also be gived at instantiation. If done so, that
# data will supersede the data given in this file. On the other hand, if the
# required data is neither passed during instantiation nor in this file, a
# default value will be set by the SO which might not be according to the
# user's needs.
# NOTE: there is an entry (icclab.haas.rootfolder) which cannot be included in
# this file as it is also needed in order to find this very file. It has to be
# delivered to the SO as an attribute additionally to every deployment call!
[cluster]
# name of the whole cluster - at the moment, this value isn't used for anything
icclab.haas.cluster.name: distributedprocessing
# image files to take as base image for master and slaves; it should be a
# debian distribution with user ec2-user and passwordless sudo execution
icclab.haas.master.image: Ubuntu-Trusty-1404-7-10-2015
icclab.haas.slave.image: Ubuntu-Trusty-1404-7-10-2015
# Name of SSH public key registration in keystone; if a name is given without a
# public key, this key name will be included into the master - in this case,
# the key has to be registered on keystone already. If key name is given with a
# public key, a key name with the given public key will be inserted into
# keystone. (and the master) If neither a key name nor a public key are given,
# the public key from the file master.id_rsa.pub will be inserted into the
# master.
icclab.haas.master.sshkeyname:
icclab.haas.master.publickey:
# flavor for master / slave
icclab.haas.master.flavor: m1.small
icclab.haas.slave.flavor: m1.small
# number of masters and slaves to be configured (possibly certain frameworks
# don't allow multiple masters; plus decision whether a slave node should be
# started on the master node as well
icclab.haas.master.number: 1
icclab.haas.slave.number: 2
icclab.haas.master.slaveonmaster: True
# should a floating IP be created for the cluster?
icclab.haas.master.withfloatingip: True
icclab.haas.master.attachfloatingipwithid: 034f34b7-5ff5-4109-9e8b-a9d919e7ff39
# name for the master(s) and slave(s)
icclab.haas.master.name: masternode
icclab.haas.slave.name: slavenode
# network configuration for the subnet
icclab.haas.network.subnet.cidr: 192.168.19.0/24
icclab.haas.network.gw.ip: 192.168.19.1
icclab.haas.network.subnet.allocpool.start: 192.168.19.2
icclab.haas.network.subnet.allocpool.end: 192.168.19.254
icclab.haas.network.dnsservers: ["64.6.64.6","64.6.65.6"]
# id of the nova image containing the directory structure for the software to
# be installed (can be retrieved with `nova image-list` or from Horizon)
icclab.haas.master.imageid: 8d92eb3d-2347-4177-939d-4f0f8537d31a
# method how to transfer the files to the slaves of the cluster: either
# transfer them in a packed form and unpack them on the slaves or transfer them
# in an unpacked way; this option is the name of a defined bash function.
# Currently, the two optinos transferUnpackedFiles and transferFirstUnpackLater
# are possible.
icclab.haas.master.transfermethod: transferUnpackedFiles
# some information about the Linux user has to be provided - this user has to
# be present on all of the deployed machines and it needs to have passwordless
# sudo access
icclab.haas.cluster.username: ec2-user
icclab.haas.cluster.usergroup: ec2-user
icclab.haas.cluster.homedir: /home/ec2-user
# debug settings - you can decide on your own whether to deploy the created
# template on OpenStack and for debugging purposes, you can also provide a path
# for saving the template locally on the machine where the SO is run
icclab.haas.debug.donotdeploy: False
icclab.haas.debug.savetemplatetolocalpath:
#!/bin/bash
# NOTE about this bash file: this file will be used to setup the distributed
# computing cluster on OpenStack. This includes copying the actual application
# frameworks from an external cinder volume to each master/slave and writing
# the configuration files to each of them. The parameters within dollar signs
# (e.g. $homedir$) will be filled by the service orchestrator (so.py) with
# either given values from the user, default settings either from file
# defaultSettings.cfg / assumptions within the serice orchestrator or with
# pre-defined configuration files within the /data directory of the SO bundle.
{
SECONDS=0
# disable IPv6 as Hadoop won't run on a system with it activated
echo "disabling IPv6" >> /home/ec2-user/deployment.log
echo "disabling IPv6" >> $homedir$/deployment.log
echo -e "\nnet.ipv6.conf.all.disable_ipv6 = 1\nnet.ipv6.conf.default.disable_ipv6 = 1\nnet.ipv6.conf.lo.disable_ipv6 = 1" >> /etc/sysctl.conf
sysctl -p
# add group and user for hadoop and setup SSH for it
#echo "adding user ec2-user:hadoop" >> /home/ec2-user/deployment.log
#addgroup hadoop
#useradd --gid hadoop --home /home/ec2-user --create-home --shell /bin/bash ec2-user
#echo "ec2-user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
#passwd --delete ec2-user
#su ec2-user -c "mkdir /home/ec2-user/.ssh"
su ec2-user -c 'echo -e "$master.id_rsa$" > /home/ec2-user/.ssh/id_rsa'
su ec2-user -c 'echo -e "$master.id_rsa.pub$" > /home/ec2-user/.ssh/id_rsa.pub'
#su ec2-user -c "cat /home/ec2-user/.ssh/id_rsa.pub >> /home/ec2-user/.ssh/authorized_keys"
su ec2-user -c 'echo -e "Host *\n StrictHostKeyChecking no\n UserKnownHostsFile=/dev/null" > /home/ec2-user/.ssh/config'
chmod 0600 /home/ec2-user/.ssh/*
# download Hadoop & Java on the master and install them (including setting the environment variables)
# setup master's SSH configuration
su $username$ -c 'echo -e "$master.id_rsa$" > $homedir$/.ssh/id_rsa'
su $username$ -c 'echo -e "$master.id_rsa.pub$" > $homedir$/.ssh/id_rsa.pub'
$insert_master_pub_key$
su $username$ -c 'echo -e "Host *\n StrictHostKeyChecking no\n UserKnownHostsFile=/dev/null" > $homedir$/.ssh/config'
chmod 0600 $homedir$/.ssh/*
# copying Hadoop & Java on the master and install them (including setting the
# environment variables)
cd /root
echo "downloading hadoop..." >> /home/ec2-user/deployment.log
wget $hadoop_uri$
echo "downloading jdk..." >> /home/ec2-user/deployment.log
wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "$jdk_uri$"
echo "setting up files for deployment on slaves..." >> /home/ec2-user/deployment.log
# move packages to ec2-user's home directory (access rights)
mkdir /home/ec2-user/archives
mv /root/{hadoop-2.7.1,jdk-8u60-linux-x64}.tar.gz /home/ec2-user/archives
# disk has to be mounted by ID as the virtual disk device in the /dev folder
# can be different with every restart
mount -o ro /dev/disk/by-id/$disk_id$ /mnt/
mkdir $homedir$/archives
echo "setting up files for deployment on slaves..." >> $homedir$/deployment.log
cat - >> /root/bashrc.suffix <<'EOF'
export JAVA_HOME=/usr/lib/java/jdk
export PATH=$PATH:$JAVA_HOME/bin
......@@ -38,97 +43,128 @@ EOF
# configure Hadoop
# first of all, let's create the config files for the slaves
mkdir /home/ec2-user/hadoopconf
mv /root/bashrc.suffix /home/ec2-user/hadoopconf
mkdir $homedir$/hadoopconf
mv /root/bashrc.suffix $homedir$/hadoopconf
# creating /etc/hosts file's replacement - don't forget: slaves need to have the same name as configured with Heat Template!!!
# creating /etc/hosts file's replacement - don't forget: slaves need to have
# the same name as configured with Heat Template!!!
echo -e "127.0.0.1\tlocalhost\n`/sbin/ifconfig eth0 | grep 'inet addr' | cut -d: -f2 | awk '{print $1}'` $masternode$" > /root/hosts.replacement
cat - >> /root/hosts.replacement <<'EOF'
$hostsfilecontent$
EOF
mv -f /root/hosts.replacement /home/ec2-user/hadoopconf
mv -f /root/hosts.replacement $homedir$/hadoopconf
# create yarn-site.xml:
cat - > /home/ec2-user/hadoopconf/yarn-site.xml << 'EOF'
cat - > $homedir$/hadoopconf/yarn-site.xml << 'EOF'
$yarn-site.xml$
EOF
# create core-site.xml:
cat - > /home/ec2-user/hadoopconf/core-site.xml << 'EOF'
cat - > $homedir$/hadoopconf/core-site.xml << 'EOF'
$core-site.xml$
EOF
# create mapred-site.xml:
cat - >> /home/ec2-user/hadoopconf/mapred-site.xml << 'EOF'
cat - >> $homedir$/hadoopconf/mapred-site.xml << 'EOF'
$mapred-site.xml$
EOF
# create hdfs-site.xml: (here, replication factor has to be entered!!!)
cat - >> /home/ec2-user/hadoopconf/hdfs-site.xml << 'EOF'
cat - >> $homedir$/hadoopconf/hdfs-site.xml << 'EOF'
$hdfs-site.xml$
EOF
# create hadoop-env.sh:
cat - >> /home/ec2-user/hadoopconf/hadoop-env.sh << 'EOF'
cat - >> $homedir$/hadoopconf/hadoop-env.sh << 'EOF'
$hadoop-env.sh$
EOF
# setup parallel ssh
apt-get install -y pssh
cat - > /home/ec2-user/hosts.lst << 'EOF'
# copy pssh/pscp to /usr/bin/pssh on master
# originally from Git repo https://github.com/jcmcken/parallel-ssh
cp -r /mnt/pssh/pssh /usr/bin/
cat - > $homedir$/hosts.lst << 'EOF'
127.0.0.1
$for_loop_slaves$
EOF
echo "copying hadoop and jdk to slaves" >> /home/ec2-user/deployment.log
su ec2-user -c "parallel-scp -h /home/ec2-user/hosts.lst /home/ec2-user/archives/{hadoop-2.7.1.tar.gz,jdk-8u60-linux-x64.tar.gz} /home/ec2-user"
echo "unpacking hadoop" >> /home/ec2-user/deployment.log
su ec2-user -c "parallel-ssh -t 200 -h /home/ec2-user/hosts.lst \"tar -xzf /home/ec2-user/hadoop-2.7.1.tar.gz\""
echo "unpacking jdk" >> /home/ec2-user/deployment.log
su ec2-user -c "parallel-ssh -t 200 -h /home/ec2-user/hosts.lst \"tar -xzf /home/ec2-user/jdk-8u60-linux-x64.tar.gz\""
echo "setting up both" >> /home/ec2-user/deployment.log
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo mkdir /usr/lib/hadoop\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo mv /home/ec2-user/hadoop-2.7.1 /usr/lib/hadoop\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo ln -s /usr/lib/hadoop/hadoop-2.7.1 /usr/lib/hadoop/hadoop\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo mv /usr/lib/hadoop/hadoop-2.7.1/etc/hadoop/ /etc/\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo mkdir -p /usr/lib/java\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo mv /home/ec2-user/jdk1.8.0_60/ /usr/lib/java/\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo ln -s /usr/lib/java/jdk1.8.0_60/ /usr/lib/java/jdk\""
su ec2-user -c "parallel-scp -h /home/ec2-user/hosts.lst /home/ec2-user/hadoopconf/bashrc.suffix /home/ec2-user"
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo sh -c \\\"cat /home/ec2-user/bashrc.suffix >> /etc/bash.bashrc\\\"\""
function transferFirstUnpackLater {
# copying hadoop & jdk to slaves in a compact form and unpacking them on
# the slaves
echo "copying hadoop and jdk to slaves" >> $homedir$/deployment.log
su $username$ -c "/usr/bin/pssh/bin/pscp -h $homedir$/hosts.lst /mnt/{hadoop/hadoop-2.7.1.tar.gz,java/jdk-8u60-linux-x64.tar.gz} $homedir$"
echo "unpacking hadoop" >> $homedir$/deployment.log
su $username$ -c "/usr/bin/pssh/bin/pssh -t 2000 -h $homedir$/hosts.lst \"tar -xzf $homedir$/hadoop-2.7.1.tar.gz\""
echo "unpacking jdk" >> $homedir$/deployment.log
su $username$ -c "/usr/bin/pssh/bin/pssh -t 2000 -h $homedir$/hosts.lst \"tar -xzf $homedir$/jdk-8u60-linux-x64.tar.gz\""
echo "setting up both" >> $homedir$/deployment.log
# done with copying/unpacking hadoop/jdk
}
function transferUnpackedFiles {
# in this scenario, hadoop/jdk will be transferred to the slaves in an
# unpacked form
echo "copying hadoop and jdk to slaves" >> $homedir$/deployment.log
su $username$ -c "/usr/bin/pssh/bin/pscp -r -h $homedir$/hosts.lst /mnt/{hadoop/hadoop-2.7.1,java/jdk1.8.0_60} $homedir$"
# done with transfer
}
# here, the script has to decide which function to call:
# transferFirstUnpackLater or transferUnpackedFiles
echo "transferring hadoop & jdk to the masters/slaves and unpacking them" >> $homedir$/deployment.log
$transfer_method$
# the mounted volume isn't needed anymore - for security's sake, it will be
# unmounted
umount /mnt
echo "setting up hadoop & jdk" >> $homedir$/deployment.log
su $username$ -c "/usr/bin/pssh/bin/pssh -h $homedir$/hosts.lst \"sudo mkdir /usr/lib/hadoop\""
su $username$ -c "/usr/bin/pssh/bin/pssh -h $homedir$/hosts.lst \"sudo mv $homedir$/hadoop-2.7.1 /usr/lib/hadoop\""
su $username$ -c "/usr/bin/pssh/bin/pssh -h $homedir$/hosts.lst \"sudo ln -s /usr/lib/hadoop/hadoop-2.7.1 /usr/lib/hadoop/hadoop\""
su $username$ -c "/usr/bin/pssh/bin/pssh -h $homedir$/hosts.lst \"sudo mv /usr/lib/hadoop/hadoop-2.7.1/etc/hadoop/ /etc/\""
su $username$ -c "/usr/bin/pssh/bin/pssh -h $homedir$/hosts.lst \"sudo mkdir -p /usr/lib/java\""
su $username$ -c "/usr/bin/pssh/bin/pssh -h $homedir$/hosts.lst \"sudo mv $homedir$/jdk1.8.0_60/ /usr/lib/java/\""
su $username$ -c "/usr/bin/pssh/bin/pssh -h $homedir$/hosts.lst \"sudo ln -s /usr/lib/java/jdk1.8.0_60/ /usr/lib/java/jdk\""
su $username$ -c "/usr/bin/pssh/bin/pscp -h $homedir$/hosts.lst $homedir$/hadoopconf/bashrc.suffix $homedir$"
su $username$ -c "/usr/bin/pssh/bin/pssh -h $homedir$/hosts.lst \"sudo sh -c \\\"cat $homedir$/bashrc.suffix >> /etc/bash.bashrc\\\"\""
# now, let's copy the files to the slaves
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo mkdir -p /app/hadoop/tmp\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo chown ec2-user:ec2-user /app/hadoop/tmp\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo chmod 750 /app/hadoop/tmp\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo chown -R ec2-user:ec2-user /etc/hadoop\""
# the file has to be copied into the user directory as ec2-user doesn't have permissions to write into /etc/hadoop
echo "copying config files from master to slave..." >> /home/ec2-user/deployment.log
su ec2-user -c "parallel-scp -h /home/ec2-user/hosts.lst /home/ec2-user/hadoopconf/core-site.xml /home/ec2-user"
su $username$ -c "/usr/bin/pssh/bin/pssh -h $homedir$/hosts.lst \"sudo mkdir -p /app/hadoop/tmp\""
su $username$ -c "/usr/bin/pssh/bin/pssh -h $homedir$/hosts.lst \"sudo chown $username$:$usergroup$ /app/hadoop/tmp\""
su $username$ -c "/usr/bin/pssh/bin/pssh -h $homedir$/hosts.lst \"sudo chmod 750 /app/hadoop/tmp\""
su $username$ -c "/usr/bin/pssh/bin/pssh -h $homedir$/hosts.lst \"sudo chown -R $username$:$usergroup$ /etc/hadoop\""
# the file has to be copied into the user directory as $username$ doesn't have
# permissions to write into /etc/hadoop
echo "copying config files from master to slave..." >> $homedir$/deployment.log
su $username$ -c "/usr/bin/pssh/bin/pscp -h $homedir$/hosts.lst $homedir$/hadoopconf/core-site.xml $homedir$"
# move file to its final location (/etc/hadoop)
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo mv -f /home/ec2-user/core-site.xml /etc/hadoop\""
su $username$ -c "/usr/bin/pssh/bin/pssh -h $homedir$/hosts.lst \"sudo mv -f $homedir$/core-site.xml /etc/hadoop\""
su ec2-user -c "parallel-scp -h /home/ec2-user/hosts.lst /home/ec2-user/hadoopconf/{{mapred,hdfs,yarn}-site.xml,hadoop-env.sh} /etc/hadoop"
su $username$ -c "/usr/bin/pssh/bin/pscp -h $homedir$/hosts.lst $homedir$/hadoopconf/{{mapred,hdfs,yarn}-site.xml,hadoop-env.sh} /etc/hadoop"
su ec2-user -c "parallel-scp -h /home/ec2-user/hosts.lst /home/ec2-user/hadoopconf/hosts.replacement /home/ec2-user"
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo mv -f /home/ec2-user/hosts.replacement /etc/hosts\""
su $username$ -c "/usr/bin/pssh/bin/pscp -h $homedir$/hosts.lst $homedir$/hadoopconf/hosts.replacement $homedir$"
su $username$ -c "/usr/bin/pssh/bin/pssh -h $homedir$/hosts.lst \"sudo mv -f $homedir$/hosts.replacement /etc/hosts\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"ln -s /etc/hadoop /usr/lib/hadoop/hadoop-2.7.1/etc/hadoop\""
su $username$ -c "/usr/bin/pssh/bin/pssh -h $homedir$/hosts.lst \"ln -s /etc/hadoop /usr/lib/hadoop/hadoop-2.7.1/etc/hadoop\""
# set master and slave nodes
echo $masternode$ > /etc/hadoop/masters
cat - > /etc/hadoop/slaves << 'EOF'
$masternode$
$slavesfile$
$masternodeasslave$$slavesfile$
EOF
source /etc/hadoop/hadoop-env.sh
su ec2-user -c "/usr/lib/hadoop/hadoop/bin/hdfs namenode -format"
su ec2-user -c "/usr/lib/hadoop/hadoop/sbin/start-dfs.sh"
su ec2-user -c "/usr/lib/hadoop/hadoop/sbin/start-yarn.sh"
echo "hadoop cluster ready" >> /home/ec2-user/deployment.log
su $username$ -c "/usr/lib/hadoop/hadoop/bin/hdfs namenode -format"
su $username$ -c "/usr/lib/hadoop/hadoop/sbin/start-dfs.sh"
su $username$ -c "/usr/lib/hadoop/hadoop/sbin/start-yarn.sh"
echo "hadoop cluster ready" >> $homedir$/deployment.log
duration=$SECONDS
echo "deployment took me $duration seconds"
# in the following line, the whole regular output will be redirected to the
# file debug.log in the user's home directory and the error output to the file
# error.log within the same directory
} 2> $homedir$/error.log | tee $homedir$/debug.log
......@@ -10,7 +10,25 @@
"icclab.haas.slave.image": "",
"icclab.haas.master.sshkeyname": "",
"icclab.haas.master.publickey": "",
"icclab.haas.cluster.procframework": ""
"icclab.haas.cluster.procframework": "",
"icclab.haas.master.flavor": "",
"icclab.haas.slave.flavor": "",
"icclab.haas.master.withfloatingip": "",
"icclab.haas.master.attachfloatingipwithid": "",
"icclab.haas.master.name": "",
"icclab.haas.slave.name": "",
"icclab.haas.network.subnet.cidr": "",
"icclab.haas.network.gw.ip": "",
"icclab.haas.network.subnet.allocpool.start": "",
"icclab.haas.network.subnet.allocpool.end": "",
"icclab.haas.network.dnsservers": "",
"icclab.haas.master.imageid": "",
"icclab.haas.master.transfermethod": "",
"icclab.haas.cluster.username": "",
"icclab.haas.cluster.usergroup": "",
"icclab.haas.cluster.homedir": "",
"icclab.haas.debug.donotdeploy": "",
"icclab.haas.debug.savetemplatetolocalpath": ""
},
"service_endpoint": "http://haas.cloudcomplab.ch:8888/haas/",
"depends_on": []
......
......@@ -17,21 +17,6 @@
key_name: { get_resource: sshpublickey }
networks:
- port: { get_resource: hadoop_slave_port_$slavenumber$ }
# user_data:
# str_replace:
# template: |
# #!/bin/bash
# addgroup hadoop
# useradd --gid hadoop --home /home/hduser --create-home --shell /bin/bash hduser
# # as the only user to enter this VM is hduser, he needs to be able to access root functionality without a password
# echo "hduser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
# passwd --delete hduser
# su hduser -c "mkdir /home/hduser/.ssh"
# su hduser -c 'echo -e "$master.id_rsa.pub$" > /home/hduser/.ssh/authorized_keys'
# chmod 0600 /home/hduser/.ssh/config
# echo $info$
# params:
# $info$: "no info today"
############### end slave $slavenumber$ ##############
This diff is collapsed.
# Configuration
The system has to be configured before usage. First of all, within the data directory, there is a file defaultSettings.cfg which contains the default settings for the distributed computing cluster if they are not provided by the user.
## Software Volume
In order to make deployment faster, all software that is to be installed has to be saved within a Cinder volume. This way, it doesn't have to be downloaded from some public server. In order to achieve this, a Cinder Volume has to be created with all the necessary software saved on it, either in compressed or uncompressed form. (depending on the setting how you want to deploy the cluster) The directory structure which needs to be followed on this volume is the following:
![software volume](softwarevolume.png)
The subdirectories are always the unpacked files. The packed files can be downloaded at the addresses
http://mirror.switch.ch/mirror/apache/dist/hadoop/common/hadoop-2.7.1/hadoop-2.7.1.tar.gz
http://download.oracle.com/otn-pub/java/jdk/8u60-b27/jdk-8u60-linux-x64.tar.gz (cannot be downloaded directly)
https://github.com/jcmcken/parallel-ssh (master zip file; the renamed pssh.zip isn't actually used at all but it's unpacked to the pssh subdirectory - you can have a look at the master_bash.sh file to see the clear structure what has to be within that subdirectory; the actual pssh/pscp files are under pssh/pssh/bin)
As soon as the cluster has been setup, the volume is unmounted and can be used for other clusters as well. (in a later version, a new volume will be created for each individual cluster)
# Run Service Manager and instantiate Service Orchestrator Instances
clone the necessary Git repositories
......@@ -75,4 +94,5 @@ The easiest is to run the service orchestrator locally by starting the applicati
If a new error occurs after deploying the service orchestrator on the Cloud Controller, the log messages can be seen by connecting with the OpenShift CLI client and entering `oc logs <pod_name>`.
If this isn't helping either, a terminal within the pod can be started by `oc rsh <pod_name> /bin/sh` as there is no bash shell in this pod.
\ No newline at end of file
If this isn't helping either, a terminal within the pod can be started by `oc rsh <pod_name> /bin/sh` as there is no bash shell in this pod.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment