Commit 224fdec7 authored by Balazs's avatar Balazs

- changes to the parameter handling within the Service Orchestrator

- installation of software from dedicated Cinder Volume
- restructuring / refactoring
parent 4cf91e97
......@@ -4,12 +4,12 @@ parameters:
master_image:
type: string
description: Which image should be the master's os setup with?
default: "$master_image$"
default: $master_image$
slave_image:
type: string
description: Which image should be the slave's os setup with?
default: "$slave_image$"
default: $slave_image$
master_flavor:
type: string
......@@ -31,30 +31,25 @@ parameters:
description: slave's name (the index will be appended at the end)
default: $slavenode$
master_ssh_key:
type: string
description: name of public key of openstack user for master node
default: "$ssh_key$"
cluster_subnet_cidr:
type: string
description: CIDR of subnet that cluster is going to use
default: "$subnet_cidr$"
default: $subnet_cidr$
subnet_gateway_ip:
type: string
description: subnet's gateway's IP
default: "$subnet_gw_ip$"
default: $subnet_gw_ip$
allocation_pool_start:
type: string
description: allocation pool's starting IP address
default: "$subnet_allocation_pool_start$"
default: $subnet_allocation_pool_start$
allocation_pool_end:
type: string
description: allocation pool's last IP address
default: "$subnet_allocation_pool_end$"
default: $subnet_allocation_pool_end$
subnet_dns_nameservers:
type: comma_delimited_list
......@@ -62,7 +57,7 @@ parameters:
default: $subnet_dns_servers$
resources:
sshpublickey:
$users_ssh_public_key$ sshpublickey:
properties:
name: ssh_cluster_pub_key
public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDkbCR95uSaiAmspMoSPTKGSk3+5Vd6paKmdZQmdbHJd0a9HKLN87mYgc8qOURvxkjCEKMNYws2d2IQO0e7WELSfmMuQn/tUrLAiFTBfGsNxbarRO+UMvDvQwqQgRZ94wrjF/ELz4rQTe87MTuxpR79rbiGxHrHmJFxGAgS2evczwfmCKyyS15brWsNXaxDLyiBN3mWWz/O+4b3IEN/FKHYQcgy+SytGfHQBFkuOI5ETrRgeBy4ohGZzrn44TYjK4L2tLJJPT0rUffl81huVWoqcgwmkHETszDZm7OYkeLvX9g02JzeiKrqAFEvaWxjyreDkLEqKNF9oxi3SfdX72Yr mesz@CLT-MOB-T-6253-2
......@@ -83,7 +78,7 @@ resources:
hadoop_network:
type: OS::Neutron::Net
properties:
name: "hadoopNet"
name: hadoopNet
hadoop_subnet:
type: OS::Neutron::Subnet
......@@ -110,7 +105,7 @@ resources:
name: { get_param: master_name }
image: { get_param: master_image }
flavor: { get_param: master_flavor }
key_name: { get_resource: sshpublickey }
key_name: $master_ssh_key_entry$
networks:
- port: { get_resource: hadoop_port }
user_data:
......@@ -122,16 +117,12 @@ $paramsslave$
$slaves$
hadoop_ip:
type: OS::Neutron::FloatingIP
properties:
floating_network: "external-net"
$floating_ip_resource$
hadoop_router:
type: OS::Neutron::Router
properties:
external_gateway_info:
network: "external-net"
network: external-net
router_interface:
type: OS::Neutron::RouterInterface
......@@ -139,13 +130,20 @@ $slaves$
router_id: { get_resource: hadoop_router }
subnet_id: { get_resource: hadoop_subnet }
floating_ip_assoc:
type: OS::Neutron::FloatingIPAssociation
# software_volume:
# type: OS::Cinder::Volume
# properties:
# name: software_volume
# size: 6
# image: $image_id$
volume_attachment:
type: OS::Cinder::VolumeAttachment
properties:
floatingip_id: { get_resource: hadoop_ip }
port_id: { get_resource: hadoop_port }
volume_id: $image_id$
instance_uuid: { get_resource: hadoop_master }
mountpoint: /dev/vdb
$floating_ip_assoc$
outputs:
external_ip:
description: The IP address of the deployed master node
value: { get_attr: [ hadoop_ip, floating_ip_address ] }
$external_ip_output$
\ No newline at end of file
# configuration file for service orchestrator (currently just hadoop as a
# service)
# Everything has to be within the section "cluster" because of the SO's
# implementation. All data can also be gived at instantiation. If done so, that
# data will supersede the data given in this file. On the other hand, if the
# required data is neither passed during instantiation nor in this file, a
# default value will be set by the SO which might not be according to the
# user's needs.
# NOTE: there is an entry (icclab.haas.rootfolder) which cannot be included in
# this file as it is also needed in order to find this very file. It has to be
# delivered to the SO as an attribute additionally to every deployment call!
[cluster]
# name of the whole cluster - at the moment, this value isn't used for anything
icclab.haas.cluster.name: distributedprocessing
# image files to take as base image for master and slaves; it should be a
# debian distribution with user ec2-user and passwordless sudo execution
icclab.haas.master.image: Ubuntu-Trusty-1404-7-10-2015
icclab.haas.slave.image: Ubuntu-Trusty-1404-7-10-2015
# Name of SSH public key registration in keystone; if a name is given without a
# public key, this key name will be included into the master - in this case,
# the key has to be registered on keystone already. If key name is given with a
# public key, a key name with the given public key will be inserted into
# keystone. (and the master) If neither a key name nor a public key are given,
# the public key from the file master.id_rsa.pub will be inserted into the
# master.
icclab.haas.master.sshkeyname:
icclab.haas.master.publickey:
# flavor for master / slave
icclab.haas.master.flavor: m1.small
icclab.haas.slave.flavor: m1.small
# number of masters and slaves to be configured (possibly certain frameworks
# don't allow multiple masters; plus decision whether a slave node should be
# started on the master node as well
icclab.haas.master.number: 1
icclab.haas.slave.number: 2
icclab.haas.master.slaveonmaster: True
# should a floating IP be created for the cluster?
icclab.haas.master.withfloatingip: True
icclab.haas.master.attachfloatingipwithid: 034f34b7-5ff5-4109-9e8b-a9d919e7ff39
# name for the master(s) and slave(s)
icclab.haas.master.name: masternode
icclab.haas.slave.name: slavenode
# network configuration for the subnet
icclab.haas.network.subnet.cidr: 192.168.19.0/24
icclab.haas.network.gw.ip: 192.168.19.1
icclab.haas.network.subnet.allocpool.start: 192.168.19.2
icclab.haas.network.subnet.allocpool.end: 192.168.19.254
icclab.haas.network.dnsservers: ["64.6.64.6","64.6.65.6"]
# id of the nova image containing the directory structure for the software to
# be installed (can be retrieved with `nova image-list` or from Horizon)
icclab.haas.master.imageid: 8d92eb3d-2347-4177-939d-4f0f8537d31a
# method how to transfer the files to the slaves of the cluster: either
# transfer them in a packed form and unpack them on the slaves or transfer them
# in an unpacked way; this option is the name of a defined bash function.
# Currently, the two optinos transferUnpackedFiles and transferFirstUnpackLater
# are possible.
icclab.haas.master.transfermethod: transferUnpackedFiles
# some information about the Linux user has to be provided - this user has to
# be present on all of the deployed machines and it needs to have passwordless
# sudo access
icclab.haas.cluster.username: ec2-user
icclab.haas.cluster.usergroup: ec2-user
icclab.haas.cluster.homedir: /home/ec2-user
# debug settings - you can decide on your own whether to deploy the created
# template on OpenStack and for debugging purposes, you can also provide a path
# for saving the template locally on the machine where the SO is run
icclab.haas.debug.donotdeploy: False
icclab.haas.debug.savetemplatetolocalpath:
This diff is collapsed.
......@@ -10,7 +10,25 @@
"icclab.haas.slave.image": "",
"icclab.haas.master.sshkeyname": "",
"icclab.haas.master.publickey": "",
"icclab.haas.cluster.procframework": ""
"icclab.haas.cluster.procframework": "",
"icclab.haas.master.flavor": "",
"icclab.haas.slave.flavor": "",
"icclab.haas.master.withfloatingip": "",
"icclab.haas.master.attachfloatingipwithid": "",
"icclab.haas.master.name": "",
"icclab.haas.slave.name": "",
"icclab.haas.network.subnet.cidr": "",
"icclab.haas.network.gw.ip": "",
"icclab.haas.network.subnet.allocpool.start": "",
"icclab.haas.network.subnet.allocpool.end": "",
"icclab.haas.network.dnsservers": "",
"icclab.haas.master.imageid": "",
"icclab.haas.master.transfermethod": "",
"icclab.haas.cluster.username": "",
"icclab.haas.cluster.usergroup": "",
"icclab.haas.cluster.homedir": "",
"icclab.haas.debug.donotdeploy": "",
"icclab.haas.debug.savetemplatetolocalpath": ""
},
"service_endpoint": "http://haas.cloudcomplab.ch:8888/haas/",
"depends_on": []
......
......@@ -17,21 +17,6 @@
key_name: { get_resource: sshpublickey }
networks:
- port: { get_resource: hadoop_slave_port_$slavenumber$ }
# user_data:
# str_replace:
# template: |
# #!/bin/bash
# addgroup hadoop
# useradd --gid hadoop --home /home/hduser --create-home --shell /bin/bash hduser
# # as the only user to enter this VM is hduser, he needs to be able to access root functionality without a password
# echo "hduser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
# passwd --delete hduser
# su hduser -c "mkdir /home/hduser/.ssh"
# su hduser -c 'echo -e "$master.id_rsa.pub$" > /home/hduser/.ssh/authorized_keys'
# chmod 0600 /home/hduser/.ssh/config
# echo $info$
# params:
# $info$: "no info today"
############### end slave $slavenumber$ ##############
This diff is collapsed.
# Configuration
The system has to be configured before usage. First of all, within the data directory, there is a file defaultSettings.cfg which contains the default settings for the distributed computing cluster if they are not provided by the user.
## Software Volume
In order to make deployment faster, all software that is to be installed has to be saved within a Cinder volume. This way, it doesn't have to be downloaded from some public server. In order to achieve this, a Cinder Volume has to be created with all the necessary software saved on it, either in compressed or uncompressed form. (depending on the setting how you want to deploy the cluster) The directory structure which needs to be followed on this volume is the following:
![software volume](softwarevolume.png)
The subdirectories are always the unpacked files. The packed files can be downloaded at the addresses
http://mirror.switch.ch/mirror/apache/dist/hadoop/common/hadoop-2.7.1/hadoop-2.7.1.tar.gz
http://download.oracle.com/otn-pub/java/jdk/8u60-b27/jdk-8u60-linux-x64.tar.gz (cannot be downloaded directly)
https://github.com/jcmcken/parallel-ssh (master zip file; the renamed pssh.zip isn't actually used at all but it's unpacked to the pssh subdirectory - you can have a look at the master_bash.sh file to see the clear structure what has to be within that subdirectory; the actual pssh/pscp files are under pssh/pssh/bin)
As soon as the cluster has been setup, the volume is unmounted and can be used for other clusters as well. (in a later version, a new volume will be created for each individual cluster)
# Run Service Manager and instantiate Service Orchestrator Instances
clone the necessary Git repositories
......@@ -75,4 +94,5 @@ The easiest is to run the service orchestrator locally by starting the applicati
If a new error occurs after deploying the service orchestrator on the Cloud Controller, the log messages can be seen by connecting with the OpenShift CLI client and entering `oc logs <pod_name>`.
If this isn't helping either, a terminal within the pod can be started by `oc rsh <pod_name> /bin/sh` as there is no bash shell in this pod.
\ No newline at end of file
If this isn't helping either, a terminal within the pod can be started by `oc rsh <pod_name> /bin/sh` as there is no bash shell in this pod.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment