Commit 7b60ecf7 authored by Balazs's avatar Balazs

added public ssh key within OpenStack - now, all slave nodes are certainly...

added public ssh key within OpenStack - now, all slave nodes are certainly created before the master node - though from outside, the login has to occur with the given master.id_rsa within the bundle/data directory
parent 671bccc1
# TODO: update external network name (currently "public") and master_flavour/slave_flavour
heat_template_version: 2014-10-16
parameters:
os_image:
master_image:
type: string
description: Which image should be the master's and slave's os setup with?
default: "$os_image$"
description: Which image should be the master's os setup with?
default: "$master_image$"
slave_image:
type: string
description: Which image should be the slave's os setup with?
default: "$slave_image$"
master_flavor:
type: string
description: Flavor of the master's instance
default: "c1.medium"
default: $master_flavor$
slave_flavor:
type: string
description: Flavor of the master's instance
default: "c1.medium"
default: $slave_flavor$
master_name:
type: string
......@@ -58,6 +62,11 @@ parameters:
default: $subnet_dns_servers$
resources:
sshpublickey:
properties:
name: ssh_cluster_pub_key
public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDkbCR95uSaiAmspMoSPTKGSk3+5Vd6paKmdZQmdbHJd0a9HKLN87mYgc8qOURvxkjCEKMNYws2d2IQO0e7WELSfmMuQn/tUrLAiFTBfGsNxbarRO+UMvDvQwqQgRZ94wrjF/ELz4rQTe87MTuxpR79rbiGxHrHmJFxGAgS2evczwfmCKyyS15brWsNXaxDLyiBN3mWWz/O+4b3IEN/FKHYQcgy+SytGfHQBFkuOI5ETrRgeBy4ohGZzrn44TYjK4L2tLJJPT0rUffl81huVWoqcgwmkHETszDZm7OYkeLvX9g02JzeiKrqAFEvaWxjyreDkLEqKNF9oxi3SfdX72Yr mesz@CLT-MOB-T-6253-2
type: OS::Nova::KeyPair
hadoop_sec_group:
type: OS::Neutron::SecurityGroup
......@@ -76,17 +85,16 @@ resources:
properties:
name: "hadoopNet"
# TODO: I put quotation marks here as heat was having problems deploying the template
"hadoop_subnet":
"type": "OS::Neutron::Subnet"
"properties":
"network": { get_resource: hadoop_network }
"cidr": { get_param: cluster_subnet_cidr }
"gateway_ip": { get_param: subnet_gateway_ip }
"dns_nameservers": { get_param: subnet_dns_nameservers }
"allocation_pools":
- "start": { get_param: allocation_pool_start }
"end": { get_param: allocation_pool_end }
hadoop_subnet:
type: OS::Neutron::Subnet
properties:
network: { get_resource: hadoop_network }
cidr: { get_param: cluster_subnet_cidr }
gateway_ip: { get_param: subnet_gateway_ip }
dns_nameservers: { get_param: subnet_dns_nameservers }
allocation_pools:
- start: { get_param: allocation_pool_start }
end: { get_param: allocation_pool_end }
hadoop_port:
type: OS::Neutron::Port
......@@ -100,9 +108,9 @@ resources:
type: OS::Nova::Server
properties:
name: { get_param: master_name }
image: { get_param: os_image }
image: { get_param: master_image }
flavor: { get_param: master_flavor }
key_name: { get_param: master_ssh_key }
key_name: { get_resource: sshpublickey }
networks:
- port: { get_resource: hadoop_port }
user_data:
......@@ -117,13 +125,13 @@ $slaves$
hadoop_ip:
type: OS::Neutron::FloatingIP
properties:
floating_network: "public"
floating_network: "external-net"
hadoop_router:
type: OS::Neutron::Router
properties:
external_gateway_info:
network: "public"
network: "external-net"
router_interface:
type: OS::Neutron::RouterInterface
......
......@@ -7,17 +7,17 @@ echo -e "\nnet.ipv6.conf.all.disable_ipv6 = 1\nnet.ipv6.conf.default.disable_ipv
sysctl -p
# add group and user for hadoop and setup SSH for it
echo "adding user hduser:hadoop" >> /home/ec2-user/deployment.log
addgroup hadoop
useradd --gid hadoop --home /home/hduser --create-home --shell /bin/bash hduser
echo "hduser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
passwd --delete hduser
su hduser -c "mkdir /home/hduser/.ssh"
su hduser -c 'echo -e "$master.id_rsa$" > /home/hduser/.ssh/id_rsa'
su hduser -c 'echo -e "$master.id_rsa.pub$" > /home/hduser/.ssh/id_rsa.pub'
su hduser -c "cat /home/hduser/.ssh/id_rsa.pub >> /home/hduser/.ssh/authorized_keys"
su hduser -c 'echo -e "Host *\n StrictHostKeyChecking no\n UserKnownHostsFile=/dev/null" > /home/hduser/.ssh/config'
chmod 0600 /home/hduser/.ssh/*
#echo "adding user ec2-user:hadoop" >> /home/ec2-user/deployment.log
#addgroup hadoop
#useradd --gid hadoop --home /home/ec2-user --create-home --shell /bin/bash ec2-user
#echo "ec2-user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
#passwd --delete ec2-user
#su ec2-user -c "mkdir /home/ec2-user/.ssh"
su ec2-user -c 'echo -e "$master.id_rsa$" > /home/ec2-user/.ssh/id_rsa'
su ec2-user -c 'echo -e "$master.id_rsa.pub$" > /home/ec2-user/.ssh/id_rsa.pub'
#su ec2-user -c "cat /home/ec2-user/.ssh/id_rsa.pub >> /home/ec2-user/.ssh/authorized_keys"
su ec2-user -c 'echo -e "Host *\n StrictHostKeyChecking no\n UserKnownHostsFile=/dev/null" > /home/ec2-user/.ssh/config'
chmod 0600 /home/ec2-user/.ssh/*
# download Hadoop & Java on the master and install them (including setting the environment variables)
cd /root
......@@ -26,9 +26,9 @@ wget $hadoop_uri$
echo "downloading jdk..." >> /home/ec2-user/deployment.log
wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "$jdk_uri$"
echo "setting up files for deployment on slaves..." >> /home/ec2-user/deployment.log
# move packages to hduser's home directory (access rights)
mkdir /home/hduser/archives
mv /root/{hadoop-2.7.1,jdk-8u60-linux-x64}.tar.gz /home/hduser/archives
# move packages to ec2-user's home directory (access rights)
mkdir /home/ec2-user/archives
mv /root/{hadoop-2.7.1,jdk-8u60-linux-x64}.tar.gz /home/ec2-user/archives
cat - >> /root/bashrc.suffix <<'EOF'
export JAVA_HOME=/usr/lib/java/jdk
export PATH=$PATH:$JAVA_HOME/bin
......@@ -38,87 +38,87 @@ EOF
# configure Hadoop
# first of all, let's create the config files for the slaves
mkdir /home/hduser/hadoopconf
mv /root/bashrc.suffix /home/hduser/hadoopconf
mkdir /home/ec2-user/hadoopconf
mv /root/bashrc.suffix /home/ec2-user/hadoopconf
# creating /etc/hosts file's replacement - don't forget: slaves need to have the same name as configured with Heat Template!!!
echo -e "127.0.0.1\tlocalhost\n`/sbin/ifconfig eth0 | grep 'inet addr' | cut -d: -f2 | awk '{print $1}'` $masternode$" > /root/hosts.replacement
cat - >> /root/hosts.replacement <<'EOF'
$hostsfilecontent$
EOF
mv -f /root/hosts.replacement /home/hduser/hadoopconf
mv -f /root/hosts.replacement /home/ec2-user/hadoopconf
# create yarn-site.xml:
cat - > /home/hduser/hadoopconf/yarn-site.xml << 'EOF'
cat - > /home/ec2-user/hadoopconf/yarn-site.xml << 'EOF'
$yarn-site.xml$
EOF
# create core-site.xml:
cat - > /home/hduser/hadoopconf/core-site.xml << 'EOF'
cat - > /home/ec2-user/hadoopconf/core-site.xml << 'EOF'
$core-site.xml$
EOF
# create mapred-site.xml:
cat - >> /home/hduser/hadoopconf/mapred-site.xml << 'EOF'
cat - >> /home/ec2-user/hadoopconf/mapred-site.xml << 'EOF'
$mapred-site.xml$
EOF
# create hdfs-site.xml: (here, replication factor has to be entered!!!)
cat - >> /home/hduser/hadoopconf/hdfs-site.xml << 'EOF'
cat - >> /home/ec2-user/hadoopconf/hdfs-site.xml << 'EOF'
$hdfs-site.xml$
EOF
# create hadoop-env.sh:
cat - >> /home/hduser/hadoopconf/hadoop-env.sh << 'EOF'
cat - >> /home/ec2-user/hadoopconf/hadoop-env.sh << 'EOF'
$hadoop-env.sh$
EOF
# setup parallel ssh
apt-get install -y pssh
cat - > /home/hduser/hosts.lst << 'EOF'
cat - > /home/ec2-user/hosts.lst << 'EOF'
127.0.0.1
$for_loop_slaves$
EOF
echo "copying hadoop and jdk to slaves" >> /home/ec2-user/deployment.log
su hduser -c "parallel-scp -h /home/hduser/hosts.lst /home/hduser/archives/{hadoop-2.7.1.tar.gz,jdk-8u60-linux-x64.tar.gz} /home/hduser"
su ec2-user -c "parallel-scp -h /home/ec2-user/hosts.lst /home/ec2-user/archives/{hadoop-2.7.1.tar.gz,jdk-8u60-linux-x64.tar.gz} /home/ec2-user"
echo "unpacking hadoop" >> /home/ec2-user/deployment.log
su hduser -c "parallel-ssh -t 200 -h /home/hduser/hosts.lst \"tar -xzf /home/hduser/hadoop-2.7.1.tar.gz\""
su ec2-user -c "parallel-ssh -t 200 -h /home/ec2-user/hosts.lst \"tar -xzf /home/ec2-user/hadoop-2.7.1.tar.gz\""
echo "unpacking jdk" >> /home/ec2-user/deployment.log
su hduser -c "parallel-ssh -t 200 -h /home/hduser/hosts.lst \"tar -xzf /home/hduser/jdk-8u60-linux-x64.tar.gz\""
su ec2-user -c "parallel-ssh -t 200 -h /home/ec2-user/hosts.lst \"tar -xzf /home/ec2-user/jdk-8u60-linux-x64.tar.gz\""
echo "setting up both" >> /home/ec2-user/deployment.log
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo mkdir /usr/lib/hadoop\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo mv /home/hduser/hadoop-2.7.1 /usr/lib/hadoop\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo ln -s /usr/lib/hadoop/hadoop-2.7.1 /usr/lib/hadoop/hadoop\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo mv /usr/lib/hadoop/hadoop-2.7.1/etc/hadoop/ /etc/\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo mkdir -p /usr/lib/java\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo mv /home/hduser/jdk1.8.0_60/ /usr/lib/java/\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo ln -s /usr/lib/java/jdk1.8.0_60/ /usr/lib/java/jdk\""
su hduser -c "parallel-scp -h /home/hduser/hosts.lst /home/hduser/hadoopconf/bashrc.suffix /home/hduser"
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo sh -c \\\"cat /home/hduser/bashrc.suffix >> /etc/bash.bashrc\\\"\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo mkdir /usr/lib/hadoop\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo mv /home/ec2-user/hadoop-2.7.1 /usr/lib/hadoop\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo ln -s /usr/lib/hadoop/hadoop-2.7.1 /usr/lib/hadoop/hadoop\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo mv /usr/lib/hadoop/hadoop-2.7.1/etc/hadoop/ /etc/\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo mkdir -p /usr/lib/java\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo mv /home/ec2-user/jdk1.8.0_60/ /usr/lib/java/\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo ln -s /usr/lib/java/jdk1.8.0_60/ /usr/lib/java/jdk\""
su ec2-user -c "parallel-scp -h /home/ec2-user/hosts.lst /home/ec2-user/hadoopconf/bashrc.suffix /home/ec2-user"
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo sh -c \\\"cat /home/ec2-user/bashrc.suffix >> /etc/bash.bashrc\\\"\""
# now, let's copy the files to the slaves
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo mkdir -p /app/hadoop/tmp\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo chown hduser:hadoop /app/hadoop/tmp\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo chmod 750 /app/hadoop/tmp\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo chown -R hduser:hadoop /etc/hadoop\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo mkdir -p /app/hadoop/tmp\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo chown ec2-user:ec2-user /app/hadoop/tmp\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo chmod 750 /app/hadoop/tmp\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo chown -R ec2-user:ec2-user /etc/hadoop\""
# the file has to be copied into the user directory as hduser doesn't have permissions to write into /etc/hadoop
# the file has to be copied into the user directory as ec2-user doesn't have permissions to write into /etc/hadoop
echo "copying config files from master to slave..." >> /home/ec2-user/deployment.log
su hduser -c "parallel-scp -h /home/hduser/hosts.lst /home/hduser/hadoopconf/core-site.xml /home/hduser"
su ec2-user -c "parallel-scp -h /home/ec2-user/hosts.lst /home/ec2-user/hadoopconf/core-site.xml /home/ec2-user"
# move file to its final location (/etc/hadoop)
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo mv -f /home/hduser/core-site.xml /etc/hadoop\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo mv -f /home/ec2-user/core-site.xml /etc/hadoop\""
su hduser -c "parallel-scp -h /home/hduser/hosts.lst /home/hduser/hadoopconf/{{mapred,hdfs,yarn}-site.xml,hadoop-env.sh} /etc/hadoop"
su ec2-user -c "parallel-scp -h /home/ec2-user/hosts.lst /home/ec2-user/hadoopconf/{{mapred,hdfs,yarn}-site.xml,hadoop-env.sh} /etc/hadoop"
su hduser -c "parallel-scp -h /home/hduser/hosts.lst /home/hduser/hadoopconf/hosts.replacement /home/hduser"
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"sudo mv -f /home/hduser/hosts.replacement /etc/hosts\""
su ec2-user -c "parallel-scp -h /home/ec2-user/hosts.lst /home/ec2-user/hadoopconf/hosts.replacement /home/ec2-user"
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"sudo mv -f /home/ec2-user/hosts.replacement /etc/hosts\""
su hduser -c "parallel-ssh -h /home/hduser/hosts.lst \"ln -s /etc/hadoop /usr/lib/hadoop/hadoop-2.7.1/etc/hadoop\""
su ec2-user -c "parallel-ssh -h /home/ec2-user/hosts.lst \"ln -s /etc/hadoop /usr/lib/hadoop/hadoop-2.7.1/etc/hadoop\""
# set master and slave nodes
......@@ -128,7 +128,7 @@ $masternode$
$slavesfile$
EOF
source /etc/hadoop/hadoop-env.sh
su hduser -c "/usr/lib/hadoop/hadoop/bin/hdfs namenode -format"
su hduser -c "/usr/lib/hadoop/hadoop/sbin/start-dfs.sh"
su hduser -c "/usr/lib/hadoop/hadoop/sbin/start-yarn.sh"
su ec2-user -c "/usr/lib/hadoop/hadoop/bin/hdfs namenode -format"
su ec2-user -c "/usr/lib/hadoop/hadoop/sbin/start-dfs.sh"
su ec2-user -c "/usr/lib/hadoop/hadoop/sbin/start-yarn.sh"
echo "hadoop cluster ready" >> /home/ec2-user/deployment.log
......@@ -2,8 +2,15 @@
"service_type": "http://schemas.cloudcomplab.ch/occi/sm#haas",
"service_description": "DISCO: Hadoop as a service",
"service_attributes": {
"icclab.haas.master.number": "",
"icclab.haas.slave.number": "",
"icclab.haas.ui": "immutable"
"icclab.haas.master.slaveonmaster": "",
"icclab.haas.cluster.name": "",
"icclab.haas.master.image": "",
"icclab.haas.slave.image": "",
"icclab.haas.master.sshkeyname": "",
"icclab.haas.master.publickey": "",
"icclab.haas.cluster.procframework": ""
},
"service_endpoint": "http://haas.cloudcomplab.ch:8888/haas/",
"depends_on": []
......
......@@ -11,26 +11,27 @@
hadoop_slave_$slavenumber$:
type: OS::Nova::Server
properties:
image: { get_param: os_image }
image: { get_param: slave_image }
flavor: { get_param: slave_flavor }
name: { list_join: ["", [{ get_param: slave_name },"$slavenumber$"]] }
key_name: { get_resource: sshpublickey }
networks:
- port: { get_resource: hadoop_slave_port_$slavenumber$ }
user_data:
str_replace:
template: |
#!/bin/bash
addgroup hadoop
useradd --gid hadoop --home /home/hduser --create-home --shell /bin/bash hduser
# as the only user to enter this VM is hduser, he needs to be able to access root functionality without a password
echo "hduser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
passwd --delete hduser
su hduser -c "mkdir /home/hduser/.ssh"
su hduser -c 'echo -e "$master.id_rsa.pub$" > /home/hduser/.ssh/authorized_keys'
chmod 0600 /home/hduser/.ssh/config
echo $info$
params:
$info$: "no info today"
# user_data:
# str_replace:
# template: |
# #!/bin/bash
# addgroup hadoop
# useradd --gid hadoop --home /home/hduser --create-home --shell /bin/bash hduser
# # as the only user to enter this VM is hduser, he needs to be able to access root functionality without a password
# echo "hduser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
# passwd --delete hduser
# su hduser -c "mkdir /home/hduser/.ssh"
# su hduser -c 'echo -e "$master.id_rsa.pub$" > /home/hduser/.ssh/authorized_keys'
# chmod 0600 /home/hduser/.ssh/config
# echo $info$
# params:
# $info$: "no info today"
############### end slave $slavenumber$ ##############
......@@ -13,7 +13,6 @@
# limitations under the License.
import os
import pprint
from sdk.mcn import util
from sm.so import service_orchestrator
......@@ -34,8 +33,7 @@ class SOE(service_orchestrator.Execution):
self.hadoop_master = None
self.deployer = util.get_deployer(self.token,
url_type='public',
tenant_name=self.tenant,
region='RegionOne')
tenant_name=self.tenant)
LOG.info("token = "+self.token)
LOG.info("tenant = "+self.tenant)
......@@ -55,19 +53,50 @@ class SOE(service_orchestrator.Execution):
LOG.info(attributes)
print( "deploying stack")
"""
deploy SICs on burns and ubern
"""
masterCount = 1
slaveCount = 1
slaveOnMaster = True
clusterName = "DistributedProcessing"
masterImage = "Ubuntu-Trusty-1404-7-10-2015"
slaveImage = "Ubuntu-Trusty-1404-7-10-2015"
sshKeyName = "mesz MNMBA"
sshPublicKey = ""
processingFramework = None
masterFlavor = "m1.small"
slaveFlavor = "m1.small"
if 'icclab.haas.master.number' in attributes:
masterCount = int(attributes['icclab.haas.master.number'])
if 'icclab.haas.slave.number' in attributes:
slaveCount = int(attributes['icclab.haas.slave.number'])
if 'icclab.haas.cluster.name' in attributes:
clusterName = attributes['icclab.haas.cluster.name']
if 'icclab.haas.master.image' in attributes:
masterImage = attributes['icclab.haas.master.image']
if 'icclab.haas.slave.image' in attributes:
slaveImage = attributes['icclab.haas.slave.image']
if 'icclab.haas.master.flavor' in attributes:
masterFlavor = attributes['icclab.haas.master.flavor']
if 'icclab.haas.slave.flavor' in attributes:
slaveFlavor = attributes['icclab.haas.slave.flavor']
if 'icclab.haas.master.slaveonmaster' in attributes:
slaveOnMaster = attributes['icclab.haas.master.slaveonmaster'] in ['true', '1']
if 'icclab.haas.master.sshkeyname' in attributes:
sshKeyName = attributes['icclab.haas.master.sshkeyname']
if 'icclab.haas.master.publickey' in attributes:
sshPublicKey = attributes['icclab.haas.master.publickey']
if 'icclab.haas.cluster.procframework' in attributes:
processingFramework = attributes['icclab.haas.cluster.procframework']
LOG.info('Calling deploy')
LOG.debug('Executing deployment logic')
# compile heat template and deploy it
slaveCount = 2
#os.environ['HADOOP_SLAVE_COUNT']
# TODO this will fail! :-) Fix
from sm.so.service_orchestrator import BUNDLE_DIR
rootFolder = "data/"
rootFolder = "/app/data/"
f = open(rootFolder + "cluster.yaml")
clusterTemplate = f.read()
......@@ -117,17 +146,14 @@ class SOE(service_orchestrator.Execution):
slavesFile = ""
hostsListFile = ""
# here, some params have to be changed
os_image = "Ubuntu Trusty 14.04 (SWITCHengines)"
master_name = "masternode"
slave_name = "slavenode"
ssh_key = "MNMBA"
subnet_cidr = "192.168.19.0/24"
subnet_gw_ip = "192.168.19.1"
subnet_allocation_pool_start = "192.168.19.2"
subnet_allocation_pool_end = "192.168.19.254"
subnet_dns_servers = '["8.8.8.8"]'
subnet_dns_servers = '["64.6.64.6","64.6.65.6"]'
slaveTemplate = slaveTemplate.replace("$master.id_rsa.pub$",master_id_rsa_pub)
for i in xrange(1,slaveCount+1):
......@@ -160,17 +186,33 @@ class SOE(service_orchestrator.Execution):
clusterTemplate = clusterTemplate.replace("$master_bash.sh$",masterbash)
clusterTemplate = clusterTemplate.replace("$paramsslave$",paramsSlave)
clusterTemplate = clusterTemplate.replace("$slaves$",slaves)
clusterTemplate = clusterTemplate.replace("$os_image$",os_image)
clusterTemplate = clusterTemplate.replace("$master_image$",masterImage)
clusterTemplate = clusterTemplate.replace("$slave_image$",slaveImage)
clusterTemplate = clusterTemplate.replace("$masternode$",master_name)
clusterTemplate = clusterTemplate.replace("$slavenode$",slave_name)
clusterTemplate = clusterTemplate.replace("$ssh_key$",ssh_key)
clusterTemplate = clusterTemplate.replace("$master_flavor$",masterFlavor)
clusterTemplate = clusterTemplate.replace("$slave_flavor$",slaveFlavor)
if sshPublicKey!="":
# create new public key
print "not empty"
else:
# only enter existing SSH key's name
print "empty"
clusterTemplate = clusterTemplate.replace("$ssh_key$",sshKeyName) # was ssh_key
clusterTemplate = clusterTemplate.replace("$subnet_cidr$",subnet_cidr)
clusterTemplate = clusterTemplate.replace("$subnet_gw_ip$",subnet_gw_ip)
clusterTemplate = clusterTemplate.replace("$subnet_allocation_pool_start$",subnet_allocation_pool_start)
clusterTemplate = clusterTemplate.replace("$subnet_allocation_pool_end$",subnet_allocation_pool_end)
clusterTemplate = clusterTemplate.replace("$subnet_dns_servers$",subnet_dns_servers)
# TODO will fail - fix
# f = open( "/Users/puenktli/Desktop/currentTemplate.yaml","w")
# f.write( clusterTemplate )
# f.close()
# LOG.debug(clusterTemplate)
LOG.debug(clusterTemplate)
self.deployTemplate = clusterTemplate
......
# Copyright 2014-2015 Zuercher Hochschule fuer Angewandte Wissenschaften
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__author__ = 'andy'
# Copyright 2014-2015 Zuercher Hochschule fuer Angewandte Wissenschaften
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from occi.backend import KindBackend
from sm.so_manager import ServiceParameters
from sm.so_manager import AsychExe
from sm.so_manager import InitSO
from sm.so_manager import ActivateSO
from sm.so_manager import DeploySO
from sm.so_manager import ProvisionSO
from sm.so_manager import RetrieveSO
from sm.so_manager import UpdateSO
from sm.so_manager import DestroySO
__author__ = 'andy'
# service state model:
# - initialise
# - activate
# - deploy
# - provision -> This is THE terminal state
# - "active" (entered into runtime ops) This is not used
# - update
# - destroy
# - fail
class ServiceBackend(KindBackend):
"""
Provides the basic functionality required to CRUD SOs
"""
def __init__(self, app):
self.registry = app.registry
# these are read from a location specified in sm,cfg, service_manager::service_params
self.srv_prms = ServiceParameters()
def create(self, entity, extras):
super(ServiceBackend, self).create(entity, extras)
extras['srv_prms'] = self.srv_prms
# create the SO container
InitSO(entity, extras).run()
# run background tasks
# TODO this would be better using a workflow engine!
AsychExe([ActivateSO(entity, extras), DeploySO(entity, extras),
ProvisionSO(entity, extras)], self.registry).start()
def retrieve(self, entity, extras):
super(ServiceBackend, self).retrieve(entity, extras)
RetrieveSO(entity, extras).run()
def delete(self, entity, extras):
super(ServiceBackend, self).delete(entity, extras)
extras['srv_prms'] = self.srv_prms
AsychExe([DestroySO(entity, extras)]).start()
def update(self, old, new, extras):
super(ServiceBackend, self).update(old, new, extras)
extras['srv_prms'] = self.srv_prms
UpdateSO(old, extras, new).run()
def replace(self, old, new, extras):
raise NotImplementedError()
# Copyright 2014-2015 Zuercher Hochschule fuer Angewandte Wissenschaften
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import os
from optparse import OptionParser
__author__ = 'andy'
class DefaultConfigParser(ConfigParser.ConfigParser):
def get(self, section, option, default='', raw=False, vars=None):
try:
value = ConfigParser.ConfigParser.get(self, section, option, raw, vars)
except ConfigParser.NoOptionError:
value = default
return value
def read():
config = DefaultConfigParser()
parser = OptionParser(usage="Usage: %prog options. See %prog -h for options.")
parser.add_option("-c", "--config-file",
action="store",
type="string",
dest="config_file_path",
help="Path to the service manager configuration file.")
(options, args) = parser.parse_args()
config_file_path = ''
# TODO add better default heuristics
if 'SM_CONFIG_PATH' in os.environ:
config_file_path = os.getenv('SM_CONFIG_PATH')
elif options.config_file_path:
config_file_path = options.config_file_path
else:
parser.error("SM: Wrong number of command line arguments.")
config.read(config_file_path)
return config, config_file_path
CONFIG, CONFIG_PATH = read()
# Copyright 2014-2015 Zuercher Hochschule fuer Angewandte Wissenschaften
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import graypy
from sm.config import CONFIG
__author__ = 'andy'
# XXX this will not work inside of OpenShift - needs to be modded
def config_logger(log_level=logging.DEBUG):
logging.basicConfig(format='%(levelname)s %(asctime)s: \t%(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
log_level=log_level)
logger = logging.getLogger(__name__)
logger.setLevel(log_level)
if CONFIG.get('general', 'log_file', '') != '':
hdlr = logging.FileHandler(CONFIG.get('general', 'log_file', ''))
formatter = logging.Formatter(fmt='%(levelname)s %(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
if CONFIG.get('general', 'graylog_api', '') != '' and CONFIG.get('general', 'graylog_port', '') != '':
gray_handler = graypy.GELFHandler(CONFIG.get('general', 'graylog_api', ''), CONFIG.getint('general', 'graylog_port'))
logger.addHandler(gray_handler)
return logger
LOG = config_logger()
# Copyright 2014-2015 Zuercher Hochschule fuer Angewandte Wissenschaften
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pymongo.son_manipulator import SONManipulator
__author__ = 'florian'
class KeyTransform(SONManipulator):
"""Transforms keys going to database and restores them coming out.
This allows keys with dots in them to be used (but does break searching on
them unless the find command also uses the transform.
Example & test:
# To allow `.` (dots) in keys
import pymongo
client = pymongo.MongoClient("mongodb://localhost")
db = client['delete_me']
db.add_son_manipulator(KeyTransform(".", "_dot_"))
db['mycol'].remove()
db['mycol'].update({'_id': 1}, {'127.0.0.1': 'localhost'}, upsert=True,
manipulate=True)
print db['mycol'].find().next()
print db['mycol'].find({'127_dot_0_dot_0_dot_1': 'localhost'}).next()
Note: transformation could be easily extended to be more complex.
"""
def __init__(self, replace, replacement):
self.replace = replace
self.replacement = replacement
def transform_key(self, key):
"""Transform key for saving to database."""
return key.replace(self.replace, self.replacement)
def revert_key(self, key):
"""Restore transformed key returning from database."""
return key.replace(self.replacement, self.replace)
def transform_incoming(self, son, collection):
"""Recursively replace all keys that need transforming."""
for (key, value) in son.items():
if self.replace in key:
if isinstance(value, dict):
son[self.transform_key(key)] = self.transform_incoming(
son.pop(key), collection)
else:
son[self.transform_key(key)] = son.pop(key)
elif isinstance(value, dict): # recurse into sub-docs
son[key] = self.transform_incoming(value, collection)
return son
def transform_outgoing(self, son, collection):
"""Recursively restore all transformed keys."""
for (key, value) in son.items():
if self.replacement in key:
if isinstance(value, dict):
son[self.revert_key(key)] = self.transform_outgoing(