Commit 2f6a4748 authored by Balazs's avatar Balazs

initial release

parents
This diff is collapsed.
# Testing SO without deploying it using CC
Goto the directory of mcn_cc_sdk & setup virtenv (Note: could be done easier):
$ virtualenv /tmp/mcn_test_virt
$ source /tmp/mcn_test_virt/bin/activate
Install SDK and required packages:
$ pip install pbr six iso8601 babel requests python-heatclient==0.2.9 python-keystoneclient
$ python setup.py install # in the mcn_cc_sdk directory.
Run SO:
$ export OPENSHIFT_PYTHON_DIR=/tmp/mcn_test_virt
$ export OPENSHIFT_REPO_DIR=<path to sample so>
$ python ./wsgi/application
Optionally you can also set the DESIGN_URI if your OpenStack install is not local.
In a new terminal do get a token from keystone (token must belong to a user which has the admin role for the tenant):
$ keystone token-get
$ export KID='...'
$ export TENANT='...'
You can now visit the SO interface [here](http://localhost:8051/orchestrator/default).
## Sample requests
Initialize the SO:
$ curl -v -X PUT http://localhost:8051/orchestrator/default \
-H 'Content-Type: text/occi' \
-H 'Category: orchestrator; scheme="http://schemas.mobile-cloud-networking.eu/occi/service#"' \
-H 'X-Auth-Token: '$KID \
-H 'X-Tenant-Name: '$TENANT
Get state of the SO + service instance:
$ curl -v -X GET http://localhost:8051/orchestrator/default \
-H 'X-Auth-Token: '$KID \
-H 'X-Tenant-Name: '$TENANT
Trigger deployment of the service instance:
$ curl -v -X POST http://localhost:8051/orchestrator/default?action=deploy \
-H 'Content-Type: text/occi' \
-H 'Category: deploy; scheme="http://schemas.mobile-cloud-networking.eu/occi/service#"' \
-H 'X-Auth-Token: '$KID \
-H 'X-Tenant-Name: '$TENANT
Trigger provisioning of the service instance:
$ curl -v -X POST http://localhost:8051/orchestrator/default?action=provision \
-H 'Content-Type: text/occi' \
-H 'Category: provision; scheme="http://schemas.mobile-cloud-networking.eu/occi/service#"' \
-H 'X-Auth-Token: '$KID \
-H 'X-Tenant-Name: '$TENANT
Trigger update on SO + service instance:
$ curl -v -X POST http://localhost:8051/orchestrator/default \
-H 'Content-Type: text/occi' \
-H 'X-Auth-Token: '$KID \
-H 'X-Tenant-Name: '$TENANT \
-H 'X-OCCI-Attribute: occi.epc.attr_1="foo"'
Trigger delete of SO + service instance:
$ curl -v -X DELETE http://localhost:8051/orchestrator/default \
-H 'X-Auth-Token: '$KID \
-H 'X-Tenant-Name: '$TENANT
\ No newline at end of file
FROM gliderlabs/alpine:3.1
WORKDIR /app
COPY . /app
RUN apk --update add --virtual build-dependencies \
python-dev \
py-pip \
build-base \
curl \
&& pip install virtualenv \
&& virtualenv /env \
&& curl -s -k -o /tmp/sdk.tar.gz 'https://owncloud.mobile-cloud-networking.eu/owncloud/public.php?service=files&t=01ad0519e7e4ad9bc8fdbf5f959f183e&download' \
&& /env/bin/pip install /tmp/sdk.tar.gz \
&& curl -s -k -o /tmp/sm.tar.gz 'https://owncloud.mobile-cloud-networking.eu/owncloud/public.php?service=files&t=3ec7178ae3587866a0d94e27af95024b&download' \
&& /env/bin/pip install /tmp/sm.tar.gz \
&& /env/bin/pip install -r /app/requirements.txt \
&& /env/bin/python setup.py install \
&& rm -rf /var/cache/apk/* /root/.cache/*
EXPOSE 8080
CMD ["/env/bin/python", "./wsgi/application"]
\ No newline at end of file
# TODO: update external network name (currently "public") and master_flavour/slave_flavour
heat_template_version: 2014-10-16
parameters:
os_image:
type: string
description: Which image should be the master's and slave's os setup with?
default: "$os_image$"
master_flavor:
type: string
description: Flavor of the master's instance
default: "c1.medium"
slave_flavor:
type: string
description: Flavor of the master's instance
default: "c1.medium"
master_name:
type: string
description: master's name
default: $masternode$
slave_name:
type: string
description: slave's name (the index will be appended at the end)
default: $slavenode$
master_ssh_key:
type: string
description: name of public key of openstack user for master node
default: "$ssh_key$"
cluster_subnet_cidr:
type: string
description: CIDR of subnet that cluster is going to use
default: "$subnet_cidr$"
subnet_gateway_ip:
type: string
description: subnet's gateway's IP
default: "$subnet_gw_ip$"
allocation_pool_start:
type: string
description: allocation pool's starting IP address
default: "$subnet_allocation_pool_start$"
allocation_pool_end:
type: string
description: allocation pool's last IP address
default: "$subnet_allocation_pool_end$"
subnet_dns_nameservers:
type: comma_delimited_list
description: nameservers for the used subnet
default: $subnet_dns_servers$
resources:
hadoop_sec_group:
type: OS::Neutron::SecurityGroup
properties:
name: hadoop_security_group
rules: [
{"direction":"ingress","protocol":"tcp","port_range_min":"22","port_range_max":"22"},
{"direction":"ingress","protocol":"tcp","port_range_min":"8025","port_range_max":"8088"},
{"direction":"ingress","protocol":"tcp","port_range_min":"34342","port_range_max":"34342"},
{"direction":"ingress","protocol":"tcp","port_range_min":"50010","port_range_max":"50105"},
{"direction":"ingress","protocol":"tcp","port_range_min":"54310","port_range_max":"54310"}
]
hadoop_network:
type: OS::Neutron::Net
properties:
name: "hadoopNet"
# TODO: I put quotation marks here as heat was having problems deploying the template
"hadoop_subnet":
"type": "OS::Neutron::Subnet"
"properties":
"network": { get_resource: hadoop_network }
"cidr": { get_param: cluster_subnet_cidr }
"gateway_ip": { get_param: subnet_gateway_ip }
"dns_nameservers": { get_param: subnet_dns_nameservers }
"allocation_pools":
- "start": { get_param: allocation_pool_start }
"end": { get_param: allocation_pool_end }
hadoop_port:
type: OS::Neutron::Port
properties:
network: { get_resource: hadoop_network }
fixed_ips:
- subnet_id: { get_resource: hadoop_subnet }
security_groups: [{ get_resource: hadoop_sec_group }]
hadoop_master:
type: OS::Nova::Server
properties:
name: { get_param: master_name }
image: { get_param: os_image }
flavor: { get_param: master_flavor }
key_name: { get_param: master_ssh_key }
networks:
- port: { get_resource: hadoop_port }
user_data:
str_replace:
template: |
$master_bash.sh$
params:
$paramsslave$
$slaves$
hadoop_ip:
type: OS::Neutron::FloatingIP
properties:
floating_network: "public"
hadoop_router:
type: OS::Neutron::Router
properties:
external_gateway_info:
network: "public"
router_interface:
type: OS::Neutron::RouterInterface
properties:
router_id: { get_resource: hadoop_router }
subnet_id: { get_resource: hadoop_subnet }
floating_ip_assoc:
type: OS::Neutron::FloatingIPAssociation
properties:
floatingip_id: { get_resource: hadoop_ip }
port_id: { get_resource: hadoop_port }
outputs:
external_ip:
description: The IP address of the deployed master node
value: { get_attr: [ hadoop_ip, floating_ip_address ] }
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>hadoop.tmp.dir</name>
<value>/app/hadoop/tmp</value>
<description>A base for other temporary directories.</description>
</property>
<property>
<name>fs.defaultFS</name>
<value>hdfs://$masternode$:54310</value>
<description>The name of the default file system. A URI whose
scheme and authority determine the FileSystem implementation. The
uri's scheme determines the config property (fs.SCHEME.impl) naming
the FileSystem implementation class. The uri's authority is used to
determine the host, port, etc. for a filesystem.</description>
</property>
<property>
<name>fs.trash.interval</name>
<value>0</value>
<description>disable server trash</description>
</property>
</configuration>
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set Hadoop-specific environment variables here.
# The only required environment variable is JAVA_HOME. All others are
# optional. When running a distributed configuration it is best to
# set JAVA_HOME in this file, so that it is correctly defined on
# remote nodes.
# The java implementation to use.
export JAVA_HOME=/usr/lib/java/jdk/
# The jsvc implementation to use. Jsvc is required to run secure datanodes
# that bind to privileged ports to provide authentication of data transfer
# protocol. Jsvc is not required if SASL is configured for authentication of
# data transfer protocol using non-privileged ports.
#export JSVC_HOME=${JSVC_HOME}
export HADOOP_CONF_DIR="/etc/hadoop"
# Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
if [ "$HADOOP_CLASSPATH" ]; then
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
else
export HADOOP_CLASSPATH=$f
fi
done
# The maximum amount of heap to use, in MB. Default is 1000.
#export HADOOP_HEAPSIZE=
#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
# Extra Java runtime options. Empty by default.
export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
# Command specific options appended to HADOOP_OPTS when specified
export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
# On secure datanodes, user to run the datanode as after dropping privileges.
# This **MUST** be uncommented to enable secure HDFS if using privileged ports
# to provide authentication of data transfer protocol. This **MUST NOT** be
# defined if SASL is configured for authentication of data transfer protocol
# using non-privileged ports.
export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
# Where log files are stored. $HADOOP_HOME/logs by default.
#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
# Where log files are stored in the secure data environment.
export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
###
# HDFS Mover specific parameters
###
# Specify the JVM options to be used when starting the HDFS Mover.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HADOOP_MOVER_OPTS=""
###
# Advanced Users Only!
###
# The directory where pid files are stored. /tmp by default.
# NOTE: this should be set to a directory that can only be written to by
# the user that will run the hadoop daemons. Otherwise there is the
# potential for a symlink attack.
export HADOOP_PID_DIR=${HADOOP_PID_DIR}
export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
# A string representing this instance of hadoop. $USER by default.
export HADOOP_IDENT_STRING=$USER
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
<description>Default block replication.
The actual number of replications can be specified when the file is created.
The default is used if replication is not specified in create time.
</description>
</property>
</configuration>
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>$masternode$:54311</value>
<description>The host and port that the MapReduce job tracker runs
at. If "local", then jobs are run in-process as a single map
and reduce task.
</description>
</property>
</configuration>
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEA5GwkfebkmogJrKTKEj0yhkpN/uVXeqWipnWUJnWxyXdGvRyi
zfO5mIHPKjlEb8ZIwhCjDWMLNndiEDtHu1hC0n5jLkJ/7VKywIhUwXxrDcW2q0Tv
lDLw70MKkIEWfeMK4xfxC8+K0E3vOzE7saUe/a24hsR6x5iRcRgIEtnr3M8H5gis
skteW61rDV2sQy8ogTd5lls/zvuG9yBDfxSh2EHIMvksrRnx0ARZLjiORE60YHgc
uKIRmc65+OE2IyuC9rSyST09K1H35fNYblVqKnIMJpBxE7Mw2ZuzmJHi71/YNNic
3oiq6gBRL2lsY8q3g5CxKijRfaMYt0n3V+9mKwIDAQABAoIBAAW64L93x0xJFVUL
XTZP6rCLtKEsR6F//jCGyoycbzVKlK+xxDO+ZW159CRvA1R6eODFYhG35lQbco5N
rmL4t9Rn7zz91g+x/2HtCZNA976EsyxgslHZZUnbOKyfUF0gEOnZc+aD8kt/aPzn
I0Q1jC146iQXUa0oGp3nqGwwo0YKr1arwj22EkluFm+LSuQiYgN3CQ+NTJhFPr/u
ERE4NZ+g3dUSbVDjlz2ohdc6yjAlby7hz+QzdDFqjF/Pjyjn+EKh1Qh/yl9gHLIk
An0cpVNCauQEt1eQWLelUFc39o/nBhOfIxzv/NYqnws4OkEZlWQz5QfbxnifP1Ac
Ptu/csECgYEA/ZiP8hjGwNQvLhKpOmSgqG/XUcs2YbFhmdVmQ08ujOoEfrXvcbqE
LmDNd4v7nD45ZIw/LsVENlCMtaheK+sVmgE8u1OrJQcvYylLxXsm9wJ2RUL4xECR
b5QjlfzWfM/BL//+tFgouUsxoBPHrXcAC9xo4LhGRRSOm2q8tLPmadMCgYEA5pZ8
8m3eTyAJyD+TDa0mxOs3lkL9mpnjk+DSIy3U0k8FAE+HLamRYhJxgl9+IEgqmAhj
ech9j8iFAhQajtFyiUEiD4LVikGlX3EtZK7M5wZjdijK2aC6UIhsSIDmuPx99adF
PTlHYCfsZrMaWm+ern05EAQiV2eREuNa9+WzQ0kCgYEAq945sFoaWZV0ZEVBa9HH
EGC8DTMsKAmPnDKEnBmN8vKnGTk0jl9aNhRlLCAy5jFFtF9Ycto+4JC2zGbPa/Rn
L3inME4EL5QvCYVzVOOiBMYmYcqBzn/0ESrU81HoC67Bv0Y/2Pnmn63WdCbzrRx5
a5B1g9dfyHGmNK6iNdW20wECgYB/e8nemmGjK1pdLNDXadiaXFUiLS1asSQCGPT0
4BzenTNtpYpd83beWsYIx1TK8jsiTNj6dp78xGbM7GJ2fVOukKUNIE+BHaQZbVAk
bwP34i9RH2JvCY6YgaQkPafRtD7Ldswv+h/9CHWJnhG+2CFJ5Q+MTEzUN/0cDTi4
/3SpiQKBgGimSFgax0XR1FwhmhwUaPnM3HBCJtjWpF2wjIqVf9jFO5TgsNEq5U3k
AAoZ9ID52KTIY9vhxzfpLS7VA+lAK6U9tlNuWNFakZlwpWcQthFftd/HZxV2qqyc
kcvM96T6Ch1alYeoyjzrTdGR+2MI6y68Mc+KFdcTha0g1SoHqDtH
-----END RSA PRIVATE KEY-----
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDkbCR95uSaiAmspMoSPTKGSk3+5Vd6paKmdZQmdbHJd0a9HKLN87mYgc8qOURvxkjCEKMNYws2d2IQO0e7WELSfmMuQn/tUrLAiFTBfGsNxbarRO+UMvDvQwqQgRZ94wrjF/ELz4rQTe87MTuxpR79rbiGxHrHmJFxGAgS2evczwfmCKyyS15brWsNXaxDLyiBN3mWWz/O+4b3IEN/FKHYQcgy+SytGfHQBFkuOI5ETrRgeBy4ohGZzrn44TYjK4L2tLJJPT0rUffl81huVWoqcgwmkHETszDZm7OYkeLvX9g02JzeiKrqAFEvaWxjyreDkLEqKNF9oxi3SfdX72Yr mesz@CLT-MOB-T-6253-2
#!/bin/bash
# disable IPv6 as Hadoop won't run on a system with it activated
echo "disabling IPv6" >> /home/ec2-user/deployment.log
echo -e "\nnet.ipv6.conf.all.disable_ipv6 = 1\nnet.ipv6.conf.default.disable_ipv6 = 1\nnet.ipv6.conf.lo.disable_ipv6 = 1" >> /etc/sysctl.conf
sysctl -p
# add group and user for hadoop and setup SSH for it
echo "adding user hduser:hadoop" >> /home/ec2-user/deployment.log
addgroup hadoop
useradd --gid hadoop --home /home/hduser --create-home --shell /bin/bash hduser
echo "hduser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
passwd --delete hduser
su hduser -c "mkdir /home/hduser/.ssh"
su hduser -c 'echo -e "$master.id_rsa$" > /home/hduser/.ssh/id_rsa'
su hduser -c 'echo -e "$master.id_rsa.pub$" > /home/hduser/.ssh/id_rsa.pub'
su hduser -c "cat /home/hduser/.ssh/id_rsa.pub >> /home/hduser/.ssh/authorized_keys"
su hduser -c 'echo -e "Host *\n StrictHostKeyChecking no\n UserKnownHostsFile=/dev/null" > /home/hduser/.ssh/config'
chmod 0600 /home/hduser/.ssh/*
# download Hadoop & Java on the master and install them (including setting the environment variables)
cd /root
echo "downloading hadoop..." >> /home/ec2-user/deployment.log
wget $hadoop_uri$
echo "downloading jdk..." >> /home/ec2-user/deployment.log
wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "$jdk_uri$"
echo "setting up files for deployment on slaves..." >> /home/ec2-user/deployment.log
# move packages to hduser's home directory (access rights)
mkdir /home/hduser/archives
mv /root/{hadoop-2.7.1,jdk-8u60-linux-x64}.tar.gz /home/hduser/archives
cat - >> /root/bashrc.suffix <<'EOF'
export JAVA_HOME=/usr/lib/java/jdk
export PATH=$PATH:$JAVA_HOME/bin
export HADOOP_HOME=/usr/lib/hadoop/hadoop
export PATH=$PATH:$HADOOP_HOME/bin
EOF
# configure Hadoop
# first of all, let's create the config files for the slaves
mkdir /home/hduser/hadoopconf
mv /root/bashrc.suffix /home/hduser/hadoopconf
# creating /etc/hosts file's replacement - don't forget: slaves need to have the same name as configured with Heat Template!!!
echo -e "127.0.0.1\tlocalhost\n`/sbin/ifconfig eth0 | grep 'inet addr' | cut -d: -f2 | awk '{print $1}'` $masternode$" > /root/hosts.replacement
cat - >> /root/hosts.replacement <<'EOF'
$hostsfilecontent$
EOF
mv -f /root/hosts.replacement /home/hduser/hadoopconf
# create yarn-site.xml:
cat - > /home/hduser/hadoopconf/yarn-site.xml << 'EOF'
$yarn-site.xml$
EOF
# create core-site.xml:
cat - > /home/hduser/hadoopconf/core-site.xml << 'EOF'
$core-site.xml$
EOF
# create mapred-site.xml:
cat - >> /home/hduser/hadoopconf/mapred-site.xml << 'EOF'
$mapred-site.xml$
EOF
# create hdfs-site.xml: (here, replication factor has to be entered!!!)
cat - >> /home/hduser/hadoopconf/hdfs-site.xml << 'EOF'
$hdfs-site.xml$
EOF
# create hadoop-env.sh:
cat - >> /home/hduser/hadoopconf/hadoop-env.sh << 'EOF'
$hadoop-env.sh$
EOF
# setup parallel ssh
apt-get install -y pssh
cat - > /home/hduser/hosts.lst << 'EOF'
127.0.0.1
$for_loop_slaves$
EOF
echo "copying hadoop and jdk to slaves" >> /home/ec2-user/deployment.log
su hduser -c "parallel-scp -h /home/hduser/hosts.lst /home/hduser/archives/{hadoop-2.7.1.tar.gz,jdk-8u60-linux-x64.tar.gz} /home/hduser"
echo "unpacking hadoop" >> /home/ec2-user/deployment.log
su hduser -c "parallel-ssh -t 200 -h /home/hduser/hosts.lst \"tar -xzf /home/hduser/hadoop-2.7.1.tar.gz\""
echo "unpacking jdk" >> /home/ec2-user/deployment.log
su hduser -c "parallel-ssh -t 200 -h /home/hduser/hosts.lst \"tar -xzf /home/hduser/jdk-8u60-linux-x64.tar.gz\""
echo "setting up both" >> /home/ec2-user/deployment.log
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo mkdir /usr/lib/hadoop\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo mv /home/hduser/hadoop-2.7.1 /usr/lib/hadoop\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo ln -s /usr/lib/hadoop/hadoop-2.7.1 /usr/lib/hadoop/hadoop\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo mv /usr/lib/hadoop/hadoop-2.7.1/etc/hadoop/ /etc/\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo mkdir -p /usr/lib/java\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo mv /home/hduser/jdk1.8.0_60/ /usr/lib/java/\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo ln -s /usr/lib/java/jdk1.8.0_60/ /usr/lib/java/jdk\""
su hduser -c "parallel-scp -h /home/hduser/hosts.lst /home/hduser/hadoopconf/bashrc.suffix /home/hduser"
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo sh -c \\\"cat /home/hduser/bashrc.suffix >> /etc/bash.bashrc\\\"\""
# now, let's copy the files to the slaves
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo mkdir -p /app/hadoop/tmp\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo chown hduser:hadoop /app/hadoop/tmp\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo chmod 750 /app/hadoop/tmp\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo chown -R hduser:hadoop /etc/hadoop\""
# the file has to be copied into the user directory as hduser doesn't have permissions to write into /etc/hadoop
echo "copying config files from master to slave..." >> /home/ec2-user/deployment.log
su hduser -c "parallel-scp -h /home/hduser/hosts.lst /home/hduser/hadoopconf/core-site.xml /home/hduser"
# move file to its final location (/etc/hadoop)
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo mv -f /home/hduser/core-site.xml /etc/hadoop\""
su hduser -c "parallel-scp -h /home/hduser/hosts.lst /home/hduser/hadoopconf/{{mapred,hdfs,yarn}-site.xml,hadoop-env.sh} /etc/hadoop"
su hduser -c "parallel-scp -h /home/hduser/hosts.lst /home/hduser/hadoopconf/hosts.replacement /home/hduser"
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo mv -f /home/hduser/hosts.replacement /etc/hosts\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"ln -s /etc/hadoop /usr/lib/hadoop/hadoop-2.7.1/etc/hadoop\""
# set master and slave nodes
echo $masternode$ > /etc/hadoop/masters
cat - > /etc/hadoop/slaves << 'EOF'
$masternode$
$slavesfile$
EOF
source /etc/hadoop/hadoop-env.sh
su hduser -c "/usr/lib/hadoop/hadoop/bin/hdfs namenode -format"
su hduser -c "/usr/lib/hadoop/hadoop/sbin/start-dfs.sh"
su hduser -c "/usr/lib/hadoop/hadoop/sbin/start-yarn.sh"
echo "hadoop cluster ready" >> /home/ec2-user/deployment.log
{
"service_type": "http://schemas.cloudcomplab.ch/occi/sm#haas",
"service_description": "DISCO: Hadoop as a service",
"service_attributes": {
"icclab.haas.slave.number": "",
"icclab.haas.ui": "immutable"
},
"service_endpoint": "http://haas.cloudcomplab.ch:8888/haas/",
"depends_on": []
}
############### slave $slavenumber$ ################
hadoop_slave_port_$slavenumber$:
type: OS::Neutron::Port
properties:
name: hadoop_slave_port_$slavenumber$
network: { get_resource: hadoop_network }
fixed_ips:
- subnet_id: { get_resource: hadoop_subnet }
security_groups: [{ get_resource: hadoop_sec_group }]
hadoop_slave_$slavenumber$:
type: OS::Nova::Server
properties:
image: { get_param: os_image }
flavor: { get_param: slave_flavor }
name: { list_join: ["", [{ get_param: slave_name },"$slavenumber$"]] }
networks:
- port: { get_resource: hadoop_slave_port_$slavenumber$ }
user_data:
str_replace:
template: |
#!/bin/bash
addgroup hadoop
useradd --gid hadoop --home /home/hduser --create-home --shell /bin/bash hduser
# as the only user to enter this VM is hduser, he needs to be able to access root functionality without a password
echo "hduser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
passwd --delete hduser
su hduser -c "mkdir /home/hduser/.ssh"
su hduser -c 'echo -e "$master.id_rsa.pub$" > /home/hduser/.ssh/authorized_keys'
chmod 0600 /home/hduser/.ssh/config
echo $info$
params:
$info$: "no info today"
############### end slave $slavenumber$ ##############
<?xml version="1.0"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<configuration>
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>$masternode$:8025</value>
<description>Enter your ResourceManager hostname.</description>
</property>
</configuration>
\ No newline at end of file
bonfire
puka
\ No newline at end of file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Setuptools script.
"""
from setuptools import setup
setup(name='haas_so',
version='0.1',
description='HaaS SO',
author='ZHAW',
author_email='edmo@zhaw.ch',
url='http://blog.zhaw.ch/icclab/',
license='Apache 2.0'
)