Commit 1227b9f7 authored by Balazs's avatar Balazs

current local state with volume attachment

parent abfa254e
......@@ -124,25 +124,15 @@ $floating_ip_resource$
external_gateway_info:
network: external-net
# the disposal problem could be solved with a depends_on: here - see bug https://bugs.launchpad.net/heat/+bug/1299259 ; see also https://review.openstack.org/#/c/115336/ or https://bugs.launchpad.net/murano/+bug/1356721
# https://translate.google.com/translate?sl=auto&tl=en&js=y&prev=_t&hl=en&ie=UTF-8&u=http%3A%2F%2Fhabrahabr.ru%2Fcompany%2Fselectel%2Fblog%2F247307%2F&edit-text=&act=url
router_interface:
type: OS::Neutron::RouterInterface
properties:
router_id: { get_resource: hadoop_router }
subnet_id: { get_resource: hadoop_subnet }
# software_volume:
# type: OS::Cinder::Volume
# properties:
# name: software_volume
# size: 6
# image: $image_id$
volume_attachment:
type: OS::Cinder::VolumeAttachment
properties:
volume_id: $image_id$
instance_uuid: { get_resource: hadoop_master }
mountpoint: /dev/vdb
$volume_attachment$
$floating_ip_assoc$
outputs:
......
......@@ -12,7 +12,8 @@
[cluster]
# name of the whole cluster - at the moment, this value isn't used for anything
# default name of the whole cluster; a timestamp will be added for making it
# unique
icclab.haas.cluster.name: distributedprocessing
# image files to take as base image for master and slaves; it should be a
......@@ -77,5 +78,10 @@ icclab.haas.cluster.homedir: /home/ec2-user
# debug settings - you can decide on your own whether to deploy the created
# template on OpenStack and for debugging purposes, you can also provide a path
# for saving the template locally on the machine where the SO is run
icclab.haas.debug.donotdeploy: False
icclab.haas.debug.savetemplatetolocalpath:
icclab.haas.debug.donotdeploy: True
icclab.haas.debug.savetemplatetolocalpath:
# should a new volume be created from the volume with the given ID? for
# debugging purposes, it's easier not to create a new volume as it takes more
# time and it strains the system without necessity.
icclab.haas.master.createvolumeforattachment: True
\ No newline at end of file
# the source_volid is based on the example at http://docs.openstack.org/developer/heat/template_guide/basic_resources.html
software_volume:
type: OS::Cinder::Volume
properties:
name: software_volume
source_volid: $image_id$
# mountpoint is not reliable as it's Linux' decision where to provide the volume
volume_attachment:
type: OS::Cinder::VolumeAttachment
properties:
volume_id: { get_resource: software_volume }
instance_uuid: { get_resource: hadoop_master }
mountpoint: /dev/vdb
# mountpoint is not reliable as it's Linux' decision where to provide the volume
volume_attachment:
type: OS::Cinder::VolumeAttachment
properties:
volume_id: $image_id$
instance_uuid: { get_resource: hadoop_master }
mountpoint: /dev/vdb
......@@ -13,7 +13,7 @@
# limitations under the License.
import os
import uuid # for the stack name
from sdk.mcn import util
from sm.so import service_orchestrator
from sm.so.service_orchestrator import LOG
......@@ -29,15 +29,19 @@ class SOE(service_orchestrator.Execution):
self.tenant = tenant
self.hadoop_master = None
# the endpoint is the orchestration endpoint for the current tenant
self.deployer = util.get_deployer(self.token,
url_type='public',
tenant_name=self.tenant)
tenant_name=self.tenant,
region='ZH',
endpoint='http://lisa.cloudcomplab.ch:8004/v1/72582e3c823e4fddb9c4e334d21a4d72') #https://api.zhdk.cloud.switch.ch:8004/v1/eba341dd0c39482ba3f4f2dce2d48440')
LOG.info("token = "+self.token)
LOG.info("tenant = "+self.tenant)
# the instance variables have to be initialised - for more details, see
# defaultSettings.cfg
# data/defaultSettings.cfg
self.clusterName = None
self.masterImage = None
self.slaveImage = None
self.masterFlavor = None
......@@ -68,6 +72,8 @@ class SOE(service_orchestrator.Execution):
self.saveToLocalPath = None
self.noDeployment = None
self.createVolumeForAttachment = None
def design(self):
LOG.info("designing stack")
......@@ -119,7 +125,7 @@ class SOE(service_orchestrator.Execution):
self.slaveCount = int(getAttr('icclab.haas.slave.number'))
except:
pass
# clusterName = getAttr('icclab.haas.cluster.name')
self.clusterName = getAttr('icclab.haas.cluster.name')
self.masterImage = getAttr('icclab.haas.master.image')
self.slaveImage = getAttr('icclab.haas.slave.image')
self.masterFlavor = getAttr('icclab.haas.master.flavor')
......@@ -146,6 +152,8 @@ class SOE(service_orchestrator.Execution):
self.noDeployment = getAttr('icclab.haas.debug.donotdeploy').lower() in ['true','1']
self.saveToLocalPath = getAttr('icclab.haas.debug.savetemplatetolocalpath')
self.createVolumeForAttachment = getAttr('icclab.haas.master.createvolumeforattachment').lower() in ['true','1']
self.diskId = 'virtio-'+self.image_id[0:20]
masterSSHKeyEntry = ''
......@@ -209,6 +217,11 @@ class SOE(service_orchestrator.Execution):
" public_key: " + self.SSHMasterPublicKey + "\n\n"
masterSSHKeyEntry = "{ get_resource: users_public_key }"
# if master has to act as a slave as well, set variable accordingly
masterasslave = ""
if self.slaveOnMaster==True:
masterasslave = self.master_name+"\n"
# setup bash script for master (write replace{r,e}s into dictionary and
# replace them one by one
replaceDict = { "$master.id_rsa$": master_id_rsa,
......@@ -218,6 +231,7 @@ class SOE(service_orchestrator.Execution):
"$mapred-site.xml$": mapred_site_xml,
"$hdfs-site.xml$": hdfs_site_xml,
"$hadoop-env.sh$": hadoop_env_sh,
"$masternodeasslave$": masterasslave,
"$slavesfile$": slavesFile,
"$hostsfilecontent$": hostFileContent,
"$forloopslaves$": forLoopSlaves,
......@@ -244,6 +258,12 @@ class SOE(service_orchestrator.Execution):
floatingIpAssoc = ""
externalIpOutput = ""
# create volume attachment part
if( self.createVolumeForAttachment ):
self.createVolumeForAttachment = getFileContent('volumeAttachmentWithCreatedVolume.yaml')
else:
self.createVolumeForAttachment = getFileContent('volumeAttachmentWithoutCreatedVolume.yaml')
# if a floating IP is to be setup for the master, the variables have to be set accordingly
if True == self.withFloatingIP:
ipid = self.floatingIpId
......@@ -268,10 +288,7 @@ class SOE(service_orchestrator.Execution):
floatingIpAssoc = floatingIpAssoc+self.floatingIpId
floatingIpAssoc += "\n port_id: { get_resource: hadoop_port }\n\n"
# if master has to act as a slave as well, set variable accordingly
masterasslave = ""
if self.slaveOnMaster==True:
masterasslave = self.master_name+"\n"
self.createVolumeForAttachment = self.createVolumeForAttachment.replace("$image_id$", self.image_id)
# the cluster's heat template will have to be configured
replaceDict = {"$master_bash.sh$": self.masterbash,
......@@ -280,7 +297,6 @@ class SOE(service_orchestrator.Execution):
"$master_image$": self.masterImage,
"$slave_image$": self.slaveImage,
"$masternode$": self.master_name,
"$masternodeasslave$": masterasslave,
"$slavenode$": self.slave_name,
"$master_flavor$": self.masterFlavor,
"$slave_flavor$": self.slaveFlavor,
......@@ -296,8 +312,9 @@ class SOE(service_orchestrator.Execution):
"$subnet_allocation_pool_end$":
self.subnet_allocation_pool_end,
"$subnet_dns_servers$": self.subnet_dns_servers,
"$image_id$": self.image_id
"$volume_attachment$": self.createVolumeForAttachment
}
for key, value in replaceDict.iteritems():
clusterTemplate = clusterTemplate.replace(key, value)
......@@ -316,8 +333,13 @@ class SOE(service_orchestrator.Execution):
# deploy the created template
if self.hadoop_master is None and not self.noDeployment:
self.hadoop_master = self.deployer.deploy(self.deployTemplate,
self.token)
self.token,
name=self.clusterName+"_"+str(uuid.uuid1()))
LOG.info('Hadoop stack ID: ' + self.hadoop_master)
def provision(self, attributes=None):
......
......@@ -17,11 +17,11 @@ manifest=/Users/puenktli/Documents/ZHAW/SwitchGit/HaaS/bundle/data/service_manif
# This is the location where the service orchestrator bundle is located
# required; local file system path string
bundle_location=amnion/haas-so:latest
bundle_location=amnion/haas:latest
# This is the endpoint where the keystone service runs
# required; default: http://localhost:35357/v2.0; a URL string
design_uri=http://engines.switch.ch:35357/v2.0
design_uri=https://keystone.cloud.switch.ch:5000/v2.0
[service_manager_admin]
# This enables service registration with keystone
......
......@@ -30,7 +30,7 @@ r = requests.delete(host+'/orchestrator/default', headers=heads)
curl -v -X GET http://192.168.99.100:32771/-/ -H 'Accept: text/occi' -H 'X-Auth-Token: 73c0b22f89a44fb297ae4f1d98f7b6e4' -H 'X-Tenant-Name: mesz'
curl -v -X POST http://192.168.99.100:32771/haas/ -H 'Category: haas; scheme="http://schemas.cloudcomplab.ch/occi/sm#"; class="kind";' -H 'content-type: text/occi' -H 'X-Auth-Token: 73c0b22f89a44fb297ae4f1d98f7b6e4' -H 'X-Tenant-Name: mesz'
curl -v -X POST http://127.0.0.1:8888/haas/ -H 'Category: haas; scheme="http://schemas.cloudcomplab.ch/occi/sm#"; class="kind";' -H 'content-type: text/occi' -H 'X-Auth-Token: 4bc2b7087903461eb53f1dbbc477a7d8' -H 'X-Tenant-Name: mesz@zhaw.ch'
curl -v -X POST http://160.85.231.190:8888/haas/ -H 'Category: haas; scheme="http://schemas.hurtle.it/occi/sm#"; class="kind";' -H 'content-type: text/occi' -H 'x-tenant-name: YOUR_TENANT_NAME' -H 'x-auth-token: YOUR_KEYSTONE_TOKEN'
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment