moto/tests/test_emr/test_emr.py

664 lines
23 KiB
Python
Raw Normal View History

from __future__ import unicode_literals
import time
from datetime import datetime
2013-08-08 00:32:29 +00:00
import boto
import pytz
from boto.emr.bootstrap_action import BootstrapAction
2013-08-08 00:32:29 +00:00
from boto.emr.instance_group import InstanceGroup
from boto.emr.step import StreamingStep
2013-08-08 00:32:29 +00:00
import sure # noqa
2017-02-16 03:35:45 +00:00
from moto import mock_emr_deprecated
from tests.helpers import requires_boto_gte
2013-08-08 00:32:29 +00:00
run_jobflow_args = dict(
2019-10-31 15:44:26 +00:00
job_flow_role="EMR_EC2_DefaultRole",
keep_alive=True,
2019-10-31 15:44:26 +00:00
log_uri="s3://some_bucket/jobflow_logs",
master_instance_type="c1.medium",
name="My jobflow",
num_instances=2,
2019-10-31 15:44:26 +00:00
service_role="EMR_DefaultRole",
slave_instance_type="c1.medium",
)
2013-08-08 00:32:29 +00:00
input_instance_groups = [
2019-10-31 15:44:26 +00:00
InstanceGroup(1, "MASTER", "c1.medium", "ON_DEMAND", "master"),
InstanceGroup(3, "CORE", "c1.medium", "ON_DEMAND", "core"),
InstanceGroup(6, "TASK", "c1.large", "SPOT", "task-1", "0.07"),
InstanceGroup(10, "TASK", "c1.xlarge", "SPOT", "task-2", "0.05"),
]
2013-08-08 00:32:29 +00:00
2017-02-16 03:35:45 +00:00
@mock_emr_deprecated
def test_describe_cluster():
conn = boto.connect_emr()
args = run_jobflow_args.copy()
2019-10-31 15:44:26 +00:00
args.update(
dict(
api_params={
"Applications.member.1.Name": "Spark",
"Applications.member.1.Version": "2.4.2",
"Configurations.member.1.Classification": "yarn-site",
"Configurations.member.1.Properties.entry.1.key": "someproperty",
"Configurations.member.1.Properties.entry.1.value": "somevalue",
"Configurations.member.1.Properties.entry.2.key": "someotherproperty",
"Configurations.member.1.Properties.entry.2.value": "someothervalue",
"Instances.EmrManagedMasterSecurityGroup": "master-security-group",
"Instances.Ec2SubnetId": "subnet-8be41cec",
},
availability_zone="us-east-2b",
ec2_keyname="mykey",
job_flow_role="EMR_EC2_DefaultRole",
keep_alive=False,
log_uri="s3://some_bucket/jobflow_logs",
name="My jobflow",
service_role="EMR_DefaultRole",
visible_to_all_users=True,
)
)
cluster_id = conn.run_jobflow(**args)
2019-10-31 15:44:26 +00:00
input_tags = {"tag1": "val1", "tag2": "val2"}
conn.add_tags(cluster_id, input_tags)
cluster = conn.describe_cluster(cluster_id)
2019-10-31 15:44:26 +00:00
cluster.applications[0].name.should.equal("Spark")
cluster.applications[0].version.should.equal("2.4.2")
cluster.autoterminate.should.equal("true")
# configurations appear not be supplied as attributes?
attrs = cluster.ec2instanceattributes
# AdditionalMasterSecurityGroups
# AdditionalSlaveSecurityGroups
2019-10-31 15:44:26 +00:00
attrs.ec2availabilityzone.should.equal(args["availability_zone"])
attrs.ec2keyname.should.equal(args["ec2_keyname"])
attrs.ec2subnetid.should.equal(args["api_params"]["Instances.Ec2SubnetId"])
# EmrManagedMasterSecurityGroups
# EmrManagedSlaveSecurityGroups
2019-10-31 15:44:26 +00:00
attrs.iaminstanceprofile.should.equal(args["job_flow_role"])
# ServiceAccessSecurityGroup
cluster.id.should.equal(cluster_id)
2019-10-31 15:44:26 +00:00
cluster.loguri.should.equal(args["log_uri"])
2021-07-26 06:40:39 +00:00
cluster.masterpublicdnsname.should.be.a(str)
2019-10-31 15:44:26 +00:00
cluster.name.should.equal(args["name"])
int(cluster.normalizedinstancehours).should.equal(0)
# cluster.release_label
2019-10-31 15:44:26 +00:00
cluster.shouldnt.have.property("requestedamiversion")
cluster.runningamiversion.should.equal("1.0.0")
# cluster.securityconfiguration
2019-10-31 15:44:26 +00:00
cluster.servicerole.should.equal(args["service_role"])
2019-10-31 15:44:26 +00:00
cluster.status.state.should.equal("TERMINATED")
2021-07-26 06:40:39 +00:00
cluster.status.statechangereason.message.should.be.a(str)
cluster.status.statechangereason.code.should.be.a(str)
cluster.status.timeline.creationdatetime.should.be.a(str)
# cluster.status.timeline.enddatetime.should.be.a(str)
# cluster.status.timeline.readydatetime.should.be.a(str)
2019-10-31 15:44:26 +00:00
dict((item.key, item.value) for item in cluster.tags).should.equal(input_tags)
2019-10-31 15:44:26 +00:00
cluster.terminationprotected.should.equal("false")
cluster.visibletoallusers.should.equal("true")
2013-08-08 00:32:29 +00:00
2017-02-16 03:35:45 +00:00
@mock_emr_deprecated
def test_describe_jobflows():
2015-02-14 17:00:14 +00:00
conn = boto.connect_emr()
args = run_jobflow_args.copy()
expected = {}
2015-02-14 17:00:14 +00:00
for idx in range(4):
2019-10-31 15:44:26 +00:00
cluster_name = "cluster" + str(idx)
args["name"] = cluster_name
cluster_id = conn.run_jobflow(**args)
expected[cluster_id] = {
2019-10-31 15:44:26 +00:00
"id": cluster_id,
"name": cluster_name,
"state": "WAITING",
}
# need sleep since it appears the timestamp is always rounded to
# the nearest second internally
time.sleep(1)
timestamp = datetime.now(pytz.utc)
time.sleep(1)
for idx in range(4, 6):
2019-10-31 15:44:26 +00:00
cluster_name = "cluster" + str(idx)
args["name"] = cluster_name
cluster_id = conn.run_jobflow(**args)
conn.terminate_jobflow(cluster_id)
expected[cluster_id] = {
2019-10-31 15:44:26 +00:00
"id": cluster_id,
"name": cluster_name,
"state": "TERMINATED",
}
2015-02-14 17:00:14 +00:00
jobs = conn.describe_jobflows()
jobs.should.have.length_of(6)
2015-02-14 17:00:14 +00:00
for cluster_id, y in expected.items():
resp = conn.describe_jobflows(jobflow_ids=[cluster_id])
resp.should.have.length_of(1)
resp[0].jobflowid.should.equal(cluster_id)
2015-02-14 17:00:14 +00:00
2019-10-31 15:44:26 +00:00
resp = conn.describe_jobflows(states=["WAITING"])
resp.should.have.length_of(4)
for x in resp:
2019-10-31 15:44:26 +00:00
x.state.should.equal("WAITING")
resp = conn.describe_jobflows(created_before=timestamp)
resp.should.have.length_of(4)
resp = conn.describe_jobflows(created_after=timestamp)
resp.should.have.length_of(2)
2015-02-14 17:00:14 +00:00
2017-02-16 03:35:45 +00:00
@mock_emr_deprecated
def test_describe_jobflow():
2013-08-08 00:32:29 +00:00
conn = boto.connect_emr()
args = run_jobflow_args.copy()
2019-10-31 15:44:26 +00:00
args.update(
dict(
ami_version="3.8.1",
api_params={
#'Applications.member.1.Name': 'Spark',
#'Applications.member.1.Version': '2.4.2',
#'Configurations.member.1.Classification': 'yarn-site',
#'Configurations.member.1.Properties.entry.1.key': 'someproperty',
#'Configurations.member.1.Properties.entry.1.value': 'somevalue',
#'Instances.EmrManagedMasterSecurityGroup': 'master-security-group',
"Instances.Ec2SubnetId": "subnet-8be41cec"
},
ec2_keyname="mykey",
hadoop_version="2.4.0",
name="My jobflow",
log_uri="s3://some_bucket/jobflow_logs",
keep_alive=True,
master_instance_type="c1.medium",
slave_instance_type="c1.medium",
num_instances=2,
availability_zone="us-west-2b",
job_flow_role="EMR_EC2_DefaultRole",
service_role="EMR_DefaultRole",
visible_to_all_users=True,
)
)
cluster_id = conn.run_jobflow(**args)
jf = conn.describe_jobflow(cluster_id)
2019-10-31 15:44:26 +00:00
jf.amiversion.should.equal(args["ami_version"])
jf.bootstrapactions.should.equal(None)
2021-07-26 06:40:39 +00:00
jf.creationdatetime.should.be.a(str)
2019-10-31 15:44:26 +00:00
jf.should.have.property("laststatechangereason")
2021-07-26 06:40:39 +00:00
jf.readydatetime.should.be.a(str)
jf.startdatetime.should.be.a(str)
2019-10-31 15:44:26 +00:00
jf.state.should.equal("WAITING")
2019-10-31 15:44:26 +00:00
jf.ec2keyname.should.equal(args["ec2_keyname"])
# Ec2SubnetId
2019-10-31 15:44:26 +00:00
jf.hadoopversion.should.equal(args["hadoop_version"])
int(jf.instancecount).should.equal(2)
for ig in jf.instancegroups:
2021-07-26 06:40:39 +00:00
ig.creationdatetime.should.be.a(str)
# ig.enddatetime.should.be.a(str)
ig.should.have.property("instancegroupid").being.a(str)
int(ig.instancerequestcount).should.equal(1)
2019-10-31 15:44:26 +00:00
ig.instancerole.should.be.within(["MASTER", "CORE"])
int(ig.instancerunningcount).should.equal(1)
2019-10-31 15:44:26 +00:00
ig.instancetype.should.equal("c1.medium")
2021-07-26 06:40:39 +00:00
ig.laststatechangereason.should.be.a(str)
2019-10-31 15:44:26 +00:00
ig.market.should.equal("ON_DEMAND")
2021-07-26 06:40:39 +00:00
ig.name.should.be.a(str)
ig.readydatetime.should.be.a(str)
ig.startdatetime.should.be.a(str)
2019-10-31 15:44:26 +00:00
ig.state.should.equal("RUNNING")
2019-10-31 15:44:26 +00:00
jf.keepjobflowalivewhennosteps.should.equal("true")
2021-07-26 06:40:39 +00:00
jf.masterinstanceid.should.be.a(str)
2019-10-31 15:44:26 +00:00
jf.masterinstancetype.should.equal(args["master_instance_type"])
2021-07-26 06:40:39 +00:00
jf.masterpublicdnsname.should.be.a(str)
int(jf.normalizedinstancehours).should.equal(0)
2019-10-31 15:44:26 +00:00
jf.availabilityzone.should.equal(args["availability_zone"])
jf.slaveinstancetype.should.equal(args["slave_instance_type"])
jf.terminationprotected.should.equal("false")
jf.jobflowid.should.equal(cluster_id)
# jf.jobflowrole.should.equal(args['job_flow_role'])
2019-10-31 15:44:26 +00:00
jf.loguri.should.equal(args["log_uri"])
jf.name.should.equal(args["name"])
# jf.servicerole.should.equal(args['service_role'])
jf.steps.should.have.length_of(0)
list(i.value for i in jf.supported_products).should.equal([])
2019-10-31 15:44:26 +00:00
jf.visibletoallusers.should.equal("true")
2013-08-08 00:32:29 +00:00
2017-02-16 03:35:45 +00:00
@mock_emr_deprecated
def test_list_clusters():
2013-08-08 00:32:29 +00:00
conn = boto.connect_emr()
args = run_jobflow_args.copy()
expected = {}
for idx in range(40):
2019-10-31 15:44:26 +00:00
cluster_name = "jobflow" + str(idx)
args["name"] = cluster_name
cluster_id = conn.run_jobflow(**args)
expected[cluster_id] = {
2019-10-31 15:44:26 +00:00
"id": cluster_id,
"name": cluster_name,
"normalizedinstancehours": "0",
"state": "WAITING",
}
# need sleep since it appears the timestamp is always rounded to
# the nearest second internally
time.sleep(1)
timestamp = datetime.now(pytz.utc)
time.sleep(1)
for idx in range(40, 70):
2019-10-31 15:44:26 +00:00
cluster_name = "jobflow" + str(idx)
args["name"] = cluster_name
cluster_id = conn.run_jobflow(**args)
conn.terminate_jobflow(cluster_id)
expected[cluster_id] = {
2019-10-31 15:44:26 +00:00
"id": cluster_id,
"name": cluster_name,
"normalizedinstancehours": "0",
"state": "TERMINATED",
}
args = {}
while 1:
resp = conn.list_clusters(**args)
clusters = resp.clusters
len(clusters).should.be.lower_than_or_equal_to(50)
for x in clusters:
y = expected[x.id]
2019-10-31 15:44:26 +00:00
x.id.should.equal(y["id"])
x.name.should.equal(y["name"])
x.normalizedinstancehours.should.equal(y["normalizedinstancehours"])
x.status.state.should.equal(y["state"])
2021-07-26 06:40:39 +00:00
x.status.timeline.creationdatetime.should.be.a(str)
2019-10-31 15:44:26 +00:00
if y["state"] == "TERMINATED":
2021-07-26 06:40:39 +00:00
x.status.timeline.enddatetime.should.be.a(str)
else:
2019-10-31 15:44:26 +00:00
x.status.timeline.shouldnt.have.property("enddatetime")
2021-07-26 06:40:39 +00:00
x.status.timeline.readydatetime.should.be.a(str)
2019-10-31 15:44:26 +00:00
if not hasattr(resp, "marker"):
break
2019-10-31 15:44:26 +00:00
args = {"marker": resp.marker}
2019-10-31 15:44:26 +00:00
resp = conn.list_clusters(cluster_states=["TERMINATED"])
resp.clusters.should.have.length_of(30)
for x in resp.clusters:
2019-10-31 15:44:26 +00:00
x.status.state.should.equal("TERMINATED")
resp = conn.list_clusters(created_before=timestamp)
resp.clusters.should.have.length_of(40)
resp = conn.list_clusters(created_after=timestamp)
resp.clusters.should.have.length_of(30)
2013-08-08 00:32:29 +00:00
2017-02-16 03:35:45 +00:00
@mock_emr_deprecated
def test_run_jobflow():
2013-08-08 00:32:29 +00:00
conn = boto.connect_emr()
args = run_jobflow_args.copy()
job_id = conn.run_jobflow(**args)
job_flow = conn.describe_jobflow(job_id)
2019-10-31 15:44:26 +00:00
job_flow.state.should.equal("WAITING")
job_flow.jobflowid.should.equal(job_id)
2019-10-31 15:44:26 +00:00
job_flow.name.should.equal(args["name"])
job_flow.masterinstancetype.should.equal(args["master_instance_type"])
job_flow.slaveinstancetype.should.equal(args["slave_instance_type"])
job_flow.loguri.should.equal(args["log_uri"])
job_flow.visibletoallusers.should.equal("false")
int(job_flow.normalizedinstancehours).should.equal(0)
job_flow.steps.should.have.length_of(0)
2013-08-08 00:32:29 +00:00
2017-02-16 03:35:45 +00:00
@mock_emr_deprecated
def test_run_jobflow_in_multiple_regions():
regions = {}
2019-10-31 15:44:26 +00:00
for region in ["us-east-1", "eu-west-1"]:
conn = boto.emr.connect_to_region(region)
args = run_jobflow_args.copy()
2019-10-31 15:44:26 +00:00
args["name"] = region
cluster_id = conn.run_jobflow(**args)
2019-10-31 15:44:26 +00:00
regions[region] = {"conn": conn, "cluster_id": cluster_id}
2013-08-08 00:32:29 +00:00
for region in regions.keys():
2019-10-31 15:44:26 +00:00
conn = regions[region]["conn"]
jf = conn.describe_jobflow(regions[region]["cluster_id"])
jf.name.should.equal(region)
@requires_boto_gte("2.8")
2017-02-16 03:35:45 +00:00
@mock_emr_deprecated
def test_run_jobflow_with_new_params():
# Test that run_jobflow works with newer params
conn = boto.connect_emr()
conn.run_jobflow(**run_jobflow_args)
@requires_boto_gte("2.8")
2017-02-16 03:35:45 +00:00
@mock_emr_deprecated
def test_run_jobflow_with_visible_to_all_users():
conn = boto.connect_emr()
for expected in (True, False):
2019-10-31 15:44:26 +00:00
job_id = conn.run_jobflow(visible_to_all_users=expected, **run_jobflow_args)
job_flow = conn.describe_jobflow(job_id)
job_flow.visibletoallusers.should.equal(str(expected).lower())
@requires_boto_gte("2.8")
2017-02-16 03:35:45 +00:00
@mock_emr_deprecated
def test_run_jobflow_with_instance_groups():
input_groups = dict((g.name, g) for g in input_instance_groups)
conn = boto.connect_emr()
2019-10-31 15:44:26 +00:00
job_id = conn.run_jobflow(instance_groups=input_instance_groups, **run_jobflow_args)
job_flow = conn.describe_jobflow(job_id)
2017-02-24 02:37:43 +00:00
int(job_flow.instancecount).should.equal(
2019-10-31 15:44:26 +00:00
sum(g.num_instances for g in input_instance_groups)
)
for instance_group in job_flow.instancegroups:
expected = input_groups[instance_group.name]
2019-10-31 15:44:26 +00:00
instance_group.should.have.property("instancegroupid")
int(instance_group.instancerunningcount).should.equal(expected.num_instances)
instance_group.instancerole.should.equal(expected.role)
instance_group.instancetype.should.equal(expected.type)
instance_group.market.should.equal(expected.market)
2019-10-31 15:44:26 +00:00
if hasattr(expected, "bidprice"):
instance_group.bidprice.should.equal(expected.bidprice)
2015-02-14 17:00:14 +00:00
@requires_boto_gte("2.8")
2017-02-16 03:35:45 +00:00
@mock_emr_deprecated
def test_set_termination_protection():
conn = boto.connect_emr()
job_id = conn.run_jobflow(**run_jobflow_args)
job_flow = conn.describe_jobflow(job_id)
2019-10-31 15:44:26 +00:00
job_flow.terminationprotected.should.equal("false")
conn.set_termination_protection(job_id, True)
job_flow = conn.describe_jobflow(job_id)
2019-10-31 15:44:26 +00:00
job_flow.terminationprotected.should.equal("true")
conn.set_termination_protection(job_id, False)
job_flow = conn.describe_jobflow(job_id)
2019-10-31 15:44:26 +00:00
job_flow.terminationprotected.should.equal("false")
@requires_boto_gte("2.8")
2017-02-16 03:35:45 +00:00
@mock_emr_deprecated
def test_set_visible_to_all_users():
2015-02-14 17:00:14 +00:00
conn = boto.connect_emr()
args = run_jobflow_args.copy()
2019-10-31 15:44:26 +00:00
args["visible_to_all_users"] = False
job_id = conn.run_jobflow(**args)
job_flow = conn.describe_jobflow(job_id)
2019-10-31 15:44:26 +00:00
job_flow.visibletoallusers.should.equal("false")
2015-02-14 17:00:14 +00:00
conn.set_visible_to_all_users(job_id, True)
job_flow = conn.describe_jobflow(job_id)
2019-10-31 15:44:26 +00:00
job_flow.visibletoallusers.should.equal("true")
conn.set_visible_to_all_users(job_id, False)
job_flow = conn.describe_jobflow(job_id)
2019-10-31 15:44:26 +00:00
job_flow.visibletoallusers.should.equal("false")
2015-02-14 17:00:14 +00:00
2017-02-16 03:35:45 +00:00
@mock_emr_deprecated
def test_terminate_jobflow():
2015-02-14 17:00:14 +00:00
conn = boto.connect_emr()
job_id = conn.run_jobflow(**run_jobflow_args)
flow = conn.describe_jobflows()[0]
2019-10-31 15:44:26 +00:00
flow.state.should.equal("WAITING")
conn.terminate_jobflow(job_id)
flow = conn.describe_jobflows()[0]
2019-10-31 15:44:26 +00:00
flow.state.should.equal("TERMINATED")
2015-02-14 17:00:14 +00:00
# testing multiple end points for each feature
2015-02-14 17:00:14 +00:00
2019-10-31 15:44:26 +00:00
2017-02-16 03:35:45 +00:00
@mock_emr_deprecated
def test_bootstrap_actions():
bootstrap_actions = [
BootstrapAction(
2019-10-31 15:44:26 +00:00
name="bs1",
path="path/to/script",
bootstrap_action_args=["arg1", "arg2&arg3"],
),
BootstrapAction(
2019-10-31 15:44:26 +00:00
name="bs2", path="path/to/anotherscript", bootstrap_action_args=[]
),
]
2015-02-14 17:00:14 +00:00
conn = boto.connect_emr()
cluster_id = conn.run_jobflow(
2019-10-31 15:44:26 +00:00
bootstrap_actions=bootstrap_actions, **run_jobflow_args
2015-02-14 17:00:14 +00:00
)
jf = conn.describe_jobflow(cluster_id)
for x, y in zip(jf.bootstrapactions, bootstrap_actions):
x.name.should.equal(y.name)
x.path.should.equal(y.path)
list(o.value for o in x.args).should.equal(y.args())
resp = conn.list_bootstrap_actions(cluster_id)
for i, y in enumerate(bootstrap_actions):
x = resp.actions[i]
x.name.should.equal(y.name)
x.scriptpath.should.equal(y.path)
list(arg.value for arg in x.args).should.equal(y.args())
2017-02-16 03:35:45 +00:00
@mock_emr_deprecated
def test_instance_groups():
input_groups = dict((g.name, g) for g in input_instance_groups)
conn = boto.connect_emr()
args = run_jobflow_args.copy()
2019-10-31 15:44:26 +00:00
for key in ["master_instance_type", "slave_instance_type", "num_instances"]:
del args[key]
2019-10-31 15:44:26 +00:00
args["instance_groups"] = input_instance_groups[:2]
job_id = conn.run_jobflow(**args)
jf = conn.describe_jobflow(job_id)
base_instance_count = int(jf.instancecount)
conn.add_instance_groups(job_id, input_instance_groups[2:])
jf = conn.describe_jobflow(job_id)
2017-02-24 02:37:43 +00:00
int(jf.instancecount).should.equal(
2019-10-31 15:44:26 +00:00
sum(g.num_instances for g in input_instance_groups)
)
for x in jf.instancegroups:
y = input_groups[x.name]
2019-10-31 15:44:26 +00:00
if hasattr(y, "bidprice"):
x.bidprice.should.equal(y.bidprice)
2021-07-26 06:40:39 +00:00
x.creationdatetime.should.be.a(str)
# x.enddatetime.should.be.a(str)
2019-10-31 15:44:26 +00:00
x.should.have.property("instancegroupid")
int(x.instancerequestcount).should.equal(y.num_instances)
x.instancerole.should.equal(y.role)
int(x.instancerunningcount).should.equal(y.num_instances)
x.instancetype.should.equal(y.type)
2021-07-26 06:40:39 +00:00
x.laststatechangereason.should.be.a(str)
x.market.should.equal(y.market)
2021-07-26 06:40:39 +00:00
x.name.should.be.a(str)
x.readydatetime.should.be.a(str)
x.startdatetime.should.be.a(str)
2019-10-31 15:44:26 +00:00
x.state.should.equal("RUNNING")
for x in conn.list_instance_groups(job_id).instancegroups:
y = input_groups[x.name]
2019-10-31 15:44:26 +00:00
if hasattr(y, "bidprice"):
x.bidprice.should.equal(y.bidprice)
# Configurations
# EbsBlockDevices
# EbsOptimized
2019-10-31 15:44:26 +00:00
x.should.have.property("id")
x.instancegrouptype.should.equal(y.role)
x.instancetype.should.equal(y.type)
x.market.should.equal(y.market)
x.name.should.equal(y.name)
int(x.requestedinstancecount).should.equal(y.num_instances)
int(x.runninginstancecount).should.equal(y.num_instances)
# ShrinkPolicy
2019-10-31 15:44:26 +00:00
x.status.state.should.equal("RUNNING")
2021-07-26 06:40:39 +00:00
x.status.statechangereason.code.should.be.a(str)
x.status.statechangereason.message.should.be.a(str)
x.status.timeline.creationdatetime.should.be.a(str)
# x.status.timeline.enddatetime.should.be.a(str)
x.status.timeline.readydatetime.should.be.a(str)
igs = dict((g.name, g) for g in jf.instancegroups)
conn.modify_instance_groups(
2019-10-31 15:44:26 +00:00
[igs["task-1"].instancegroupid, igs["task-2"].instancegroupid], [2, 3]
)
jf = conn.describe_jobflow(job_id)
int(jf.instancecount).should.equal(base_instance_count + 5)
igs = dict((g.name, g) for g in jf.instancegroups)
2019-10-31 15:44:26 +00:00
int(igs["task-1"].instancerunningcount).should.equal(2)
int(igs["task-2"].instancerunningcount).should.equal(3)
2017-02-16 03:35:45 +00:00
@mock_emr_deprecated
def test_steps():
input_steps = [
StreamingStep(
2019-10-31 15:44:26 +00:00
name="My wordcount example",
mapper="s3n://elasticmapreduce/samples/wordcount/wordSplitter.py",
reducer="aggregate",
input="s3n://elasticmapreduce/samples/wordcount/input",
output="s3n://output_bucket/output/wordcount_output",
),
StreamingStep(
2019-10-31 15:44:26 +00:00
name="My wordcount example & co.",
mapper="s3n://elasticmapreduce/samples/wordcount/wordSplitter2.py",
reducer="aggregate",
input="s3n://elasticmapreduce/samples/wordcount/input2",
output="s3n://output_bucket/output/wordcount_output2",
),
]
# TODO: implementation and test for cancel_steps
conn = boto.connect_emr()
2019-10-31 15:44:26 +00:00
cluster_id = conn.run_jobflow(steps=[input_steps[0]], **run_jobflow_args)
jf = conn.describe_jobflow(cluster_id)
jf.steps.should.have.length_of(1)
conn.add_jobflow_steps(cluster_id, [input_steps[1]])
jf = conn.describe_jobflow(cluster_id)
jf.steps.should.have.length_of(2)
for step in jf.steps:
2019-10-31 15:44:26 +00:00
step.actiononfailure.should.equal("TERMINATE_JOB_FLOW")
list(arg.value for arg in step.args).should.have.length_of(8)
2021-07-26 06:40:39 +00:00
step.creationdatetime.should.be.a(str)
# step.enddatetime.should.be.a(str)
2019-10-31 15:44:26 +00:00
step.jar.should.equal("/home/hadoop/contrib/streaming/hadoop-streaming.jar")
2021-07-26 06:40:39 +00:00
step.laststatechangereason.should.be.a(str)
2019-10-31 15:44:26 +00:00
step.mainclass.should.equal("")
2021-07-26 06:40:39 +00:00
step.name.should.be.a(str)
# step.readydatetime.should.be.a(str)
# step.startdatetime.should.be.a(str)
2019-10-31 15:44:26 +00:00
step.state.should.be.within(["STARTING", "PENDING"])
expected = dict((s.name, s) for s in input_steps)
steps = conn.list_steps(cluster_id).steps
for x in steps:
y = expected[x.name]
# actiononfailure
2019-10-31 15:44:26 +00:00
list(arg.value for arg in x.config.args).should.equal(
[
"-mapper",
y.mapper,
"-reducer",
y.reducer,
"-input",
y.input,
"-output",
y.output,
]
)
x.config.jar.should.equal("/home/hadoop/contrib/streaming/hadoop-streaming.jar")
x.config.mainclass.should.equal("")
# properties
2021-07-26 06:40:39 +00:00
x.should.have.property("id").should.be.a(str)
x.name.should.equal(y.name)
2019-10-31 15:44:26 +00:00
x.status.state.should.be.within(["STARTING", "PENDING"])
# x.status.statechangereason
2021-07-26 06:40:39 +00:00
x.status.timeline.creationdatetime.should.be.a(str)
# x.status.timeline.enddatetime.should.be.a(str)
# x.status.timeline.startdatetime.should.be.a(str)
x = conn.describe_step(cluster_id, x.id)
2019-10-31 15:44:26 +00:00
list(arg.value for arg in x.config.args).should.equal(
[
"-mapper",
y.mapper,
"-reducer",
y.reducer,
"-input",
y.input,
"-output",
y.output,
]
)
x.config.jar.should.equal("/home/hadoop/contrib/streaming/hadoop-streaming.jar")
x.config.mainclass.should.equal("")
# properties
2021-07-26 06:40:39 +00:00
x.should.have.property("id").should.be.a(str)
x.name.should.equal(y.name)
2019-10-31 15:44:26 +00:00
x.status.state.should.be.within(["STARTING", "PENDING"])
# x.status.statechangereason
2021-07-26 06:40:39 +00:00
x.status.timeline.creationdatetime.should.be.a(str)
# x.status.timeline.enddatetime.should.be.a(str)
# x.status.timeline.startdatetime.should.be.a(str)
2019-10-31 15:44:26 +00:00
@requires_boto_gte("2.39")
def test_list_steps_with_states():
# boto's list_steps prior to 2.39 has a bug that ignores
# step_states argument.
steps = conn.list_steps(cluster_id).steps
step_id = steps[0].id
2019-10-31 15:44:26 +00:00
steps = conn.list_steps(cluster_id, step_states=["STARTING"]).steps
steps.should.have.length_of(1)
steps[0].id.should.equal(step_id)
2019-10-31 15:44:26 +00:00
test_list_steps_with_states()
2017-02-16 03:35:45 +00:00
@mock_emr_deprecated
def test_tags():
input_tags = {"tag1": "val1", "tag2": "val2"}
conn = boto.connect_emr()
cluster_id = conn.run_jobflow(**run_jobflow_args)
conn.add_tags(cluster_id, input_tags)
2015-02-14 17:00:14 +00:00
cluster = conn.describe_cluster(cluster_id)
cluster.tags.should.have.length_of(2)
dict((t.key, t.value) for t in cluster.tags).should.equal(input_tags)
2015-02-14 17:00:14 +00:00
conn.remove_tags(cluster_id, list(input_tags.keys()))
2015-02-14 17:00:14 +00:00
cluster = conn.describe_cluster(cluster_id)
cluster.tags.should.have.length_of(0)