[prev in list] [next in list] [prev in thread] [next in thread]
List: mesos-commits
Subject: mesos git commit: Added full reserved resource info to `/slaves` master endpoint.
From: mpark () apache ! org
Date: 2016-02-29 7:25:51
Message-ID: f2e6e654358a4426a469b798f243ddfc () git ! apache ! org
[Download RAW message or body]
Repository: mesos
Updated Branches:
refs/heads/master d5fee86af -> e2a3cd63b
Added full reserved resource info to `/slaves` master endpoint.
This allows operators to list all the dynamic reservations and
persistent volumes in a cluster. This is important in itself;
it also makes it easier to use the `/unreserve` and
`/destroy-volumes` endpoints.
Review: https://reviews.apache.org/r/44047/
Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/e2a3cd63
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/e2a3cd63
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/e2a3cd63
Branch: refs/heads/master
Commit: e2a3cd63b558c2399c7341e1c232989ef73196d3
Parents: d5fee86
Author: Neil Conway <neil.conway@gmail.com>
Authored: Mon Feb 29 02:21:39 2016 -0500
Committer: Michael Park <mpark@apache.org>
Committed: Mon Feb 29 02:21:39 2016 -0500
----------------------------------------------------------------------
docs/persistent-volume.md | 6 +
docs/reservation.md | 6 +
src/master/http.cpp | 50 ++-
src/tests/persistent_volume_endpoints_tests.cpp | 313 +++++++++++++++++++
4 files changed, 373 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/mesos/blob/e2a3cd63/docs/persistent-volume.md
----------------------------------------------------------------------
diff --git a/docs/persistent-volume.md b/docs/persistent-volume.md
index 47ada98..e0fe559 100644
--- a/docs/persistent-volume.md
+++ b/docs/persistent-volume.md
@@ -367,6 +367,12 @@ volumes will be destroyed. To determine if a destroy operation \
has succeeded, the user can examine the state of the appropriate Mesos slave (e.g., \
via the slave's [/state](endpoints/slave/state.md) HTTP endpoint).
+### Listing Persistent Volumes
+
+Information about the persistent volumes at each slave in the cluster can be
+found by querying the [/slaves](endpoints/master/slaves.md) master endpoint
+(under the `reserved_resources_full` key).
+
### Programming with Persistent Volumes
Some suggestions to keep in mind when building applications that use persistent
http://git-wip-us.apache.org/repos/asf/mesos/blob/e2a3cd63/docs/reservation.md
----------------------------------------------------------------------
diff --git a/docs/reservation.md b/docs/reservation.md
index 450f4ee..55924ad 100644
--- a/docs/reservation.md
+++ b/docs/reservation.md
@@ -357,3 +357,9 @@ the resources are located. That asynchronous message may not be \
delivered, in which case no resources will be unreserved. To determine if an \
unreserve operation has succeeded, the user can examine the state of the appropriate \
Mesos slave (e.g., via the slave's [/state](endpoints/slave/state.md) HTTP \
endpoint). +
+### Listing Reservations
+
+Information about the reserved resources at each slave in the cluster can be
+found by querying the [/slaves](endpoints/master/slaves.md) master endpoint
+(under the `reserved_resources_full` key).
http://git-wip-us.apache.org/repos/asf/mesos/blob/e2a3cd63/src/master/http.cpp
----------------------------------------------------------------------
diff --git a/src/master/http.cpp b/src/master/http.cpp
index d6e1f22..5e9e28e 100644
--- a/src/master/http.cpp
+++ b/src/master/http.cpp
@@ -1036,8 +1036,54 @@ Future<Response> Master::Http::slaves(const Request& request) \
const auto slaves = [this](JSON::ObjectWriter* writer) {
writer->field("slaves", [this](JSON::ArrayWriter* writer) {
foreachvalue (const Slave* slave, master->slaves.registered) {
- writer->element(Full<Slave>(*slave));
- }
+ writer->element([&slave](JSON::ObjectWriter* writer) {
+ json(writer, Full<Slave>(*slave));
+
+ // Add the complete protobuf->JSON for all used, reserved,
+ // and offered resources. The other endpoints summarize
+ // resource information, which omits the details of
+ // reservations and persistent volumes. Full resource
+ // information is necessary so that operators can use the
+ // `/unreserve` and `/destroy-volumes` endpoints.
+
+ hashmap<string, Resources> reserved =
+ slave->totalResources.reserved();
+
+ writer->field(
+ "reserved_resources_full",
+ [&reserved](JSON::ObjectWriter* writer) {
+ foreachpair (const string& role,
+ const Resources& resources,
+ reserved) {
+ writer->field(role, [&resources](JSON::ArrayWriter* writer) {
+ foreach (const Resource& resource, resources) {
+ writer->element(JSON::Protobuf(resource));
+ }
+ });
+ }
+ });
+
+ Resources usedResources = Resources::sum(slave->usedResources);
+
+ writer->field(
+ "used_resources_full",
+ [&usedResources](JSON::ArrayWriter* writer) {
+ foreach (const Resource& resource, usedResources) {
+ writer->element(JSON::Protobuf(resource));
+ }
+ });
+
+ const Resources& offeredResources = slave->offeredResources;
+
+ writer->field(
+ "offered_resources_full",
+ [&offeredResources](JSON::ArrayWriter* writer) {
+ foreach (const Resource& resource, offeredResources) {
+ writer->element(JSON::Protobuf(resource));
+ }
+ });
+ });
+ };
});
};
http://git-wip-us.apache.org/repos/asf/mesos/blob/e2a3cd63/src/tests/persistent_volume_endpoints_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/persistent_volume_endpoints_tests.cpp \
b/src/tests/persistent_volume_endpoints_tests.cpp index 08b9102..81185a1 100644
--- a/src/tests/persistent_volume_endpoints_tests.cpp
+++ b/src/tests/persistent_volume_endpoints_tests.cpp
@@ -1390,6 +1390,319 @@ TEST_F(PersistentVolumeEndpointsTest, NoVolumes)
Shutdown();
}
+
+// This tests that dynamic reservations and persistent volumes are
+// reflected in the "/slaves" master endpoint.
+TEST_F(PersistentVolumeEndpointsTest, SlavesEndpointFullResources)
+{
+ TestAllocator<> allocator;
+
+ EXPECT_CALL(allocator, initialize(_, _, _, _));
+
+ Try<PID<Master>> master = StartMaster(&allocator);
+ ASSERT_SOME(master);
+
+ Future<SlaveID> slaveId;
+ EXPECT_CALL(allocator, addSlave(_, _, _, _, _))
+ .WillOnce(DoAll(InvokeAddSlave(&allocator),
+ FutureArg<0>(&slaveId)));
+
+ slave::Flags slaveFlags = CreateSlaveFlags();
+ slaveFlags.resources = "cpus:4;mem:2048;disk:4096";
+
+ Try<PID<Slave>> slave = StartSlave(slaveFlags);
+ ASSERT_SOME(slave);
+
+ FrameworkInfo frameworkInfo = createFrameworkInfo();
+
+ Resources unreserved = Resources::parse("cpus:1;mem:512;disk:1024").get();
+ Resources dynamicallyReserved = unreserved.flatten(
+ frameworkInfo.role(),
+ createReservationInfo(DEFAULT_CREDENTIAL.principal()));
+
+ Future<Response> response = process::http::post(
+ master.get(),
+ "reserve",
+ createBasicAuthHeaders(DEFAULT_CREDENTIAL),
+ createRequestBody(slaveId.get(), "resources", dynamicallyReserved));
+
+ AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response);
+
+ Resources volume = createPersistentVolume(
+ Megabytes(64),
+ frameworkInfo.role(),
+ "id1",
+ "path1",
+ DEFAULT_CREDENTIAL.principal());
+
+ response = process::http::post(
+ master.get(),
+ "create-volumes",
+ createBasicAuthHeaders(DEFAULT_CREDENTIAL),
+ createRequestBody(slaveId.get(), "volumes", volume));
+
+ AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response);
+
+ // Start a framework and launch a task on some (but not all) of the
+ // reserved resources at the slave.
+ MockScheduler sched;
+ MesosSchedulerDriver driver(
+ &sched, frameworkInfo, master.get(), DEFAULT_CREDENTIAL);
+
+ Future<vector<Offer>> offers;
+
+ EXPECT_CALL(sched, registered(&driver, _, _));
+
+ EXPECT_CALL(sched, resourceOffers(&driver, _))
+ .WillOnce(FutureArg<1>(&offers));
+
+ driver.start();
+
+ AWAIT_READY(offers);
+
+ ASSERT_EQ(1u, offers.get().size());
+ Offer offer = offers.get()[0];
+
+ EXPECT_TRUE(Resources(offer.resources()).contains(volume));
+
+ Resources taskUnreserved = Resources::parse("cpus:1;mem:256").get();
+ Resources taskResources = taskUnreserved.flatten(
+ frameworkInfo.role(),
+ createReservationInfo(DEFAULT_CREDENTIAL.principal()));
+
+ TaskInfo taskInfo = createTask(offer.slave_id(), taskResources, "sleep 1000");
+
+ // We use the filter explicitly here so that the resources will not
+ // be filtered for 5 seconds (the default).
+ Filters filters;
+ filters.set_refuse_seconds(0);
+
+ // Expect a TASK_RUNNING status.
+ EXPECT_CALL(sched, statusUpdate(&driver, _));
+
+ Future<Nothing> _statusUpdateAcknowledgement =
+ FUTURE_DISPATCH(_, &Slave::_statusUpdateAcknowledgement);
+
+ // Expect another resource offer.
+ EXPECT_CALL(sched, resourceOffers(&driver, _))
+ .WillOnce(FutureArg<1>(&offers));
+
+ driver.acceptOffers({offer.id()}, {LAUNCH({taskInfo})}, filters);
+
+ // Wait for TASK_RUNNING update ack.
+ AWAIT_READY(_statusUpdateAcknowledgement);
+
+ response = process::http::get(master.get(), "slaves");
+
+ AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response);
+ AWAIT_EXPECT_RESPONSE_HEADER_EQ(APPLICATION_JSON, "Content-Type", response);
+
+ Try<JSON::Object> parse = JSON::parse<JSON::Object>(response.get().body);
+
+ ASSERT_SOME(parse);
+
+ JSON::Object slavesObject = parse.get();
+
+ ASSERT_TRUE(slavesObject.values["slaves"].is<JSON::Array>());
+ JSON::Array slaveArray = slavesObject.values["slaves"].as<JSON::Array>();
+
+ EXPECT_EQ(1u, slaveArray.values.size());
+
+ ASSERT_TRUE(slaveArray.values[0].is<JSON::Object>());
+ JSON::Object slaveObject = slaveArray.values[0].as<JSON::Object>();
+
+ Try<JSON::Value> expectedReserved = JSON::parse(
+ R"~(
+ {
+ "role1": [
+ {
+ "name": "cpus",
+ "type": "SCALAR",
+ "scalar": {
+ "value": 1.0
+ },
+ "role": "role1",
+ "reservation": {
+ "principal": "test-principal"
+ }
+ },
+ {
+ "name": "mem",
+ "type": "SCALAR",
+ "scalar": {
+ "value": 512.0
+ },
+ "role": "role1",
+ "reservation": {
+ "principal": "test-principal"
+ }
+ },
+ {
+ "name": "disk",
+ "type": "SCALAR",
+ "scalar": {
+ "value": 960.0
+ },
+ "role": "role1",
+ "reservation": {
+ "principal": "test-principal"
+ }
+ },
+ {
+ "name": "disk",
+ "type": "SCALAR",
+ "scalar": {
+ "value": 64.0
+ },
+ "role": "role1",
+ "reservation": {
+ "principal": "test-principal"
+ },
+ "disk": {
+ "persistence": {
+ "id": "id1"
+ },
+ "volume": {
+ "mode": "RW",
+ "container_path": "path1"
+ }
+ }
+ }
+ ]
+ })~");
+
+ ASSERT_SOME(expectedReserved);
+
+ Try<JSON::Value> expectedUsed = JSON::parse(
+ R"~(
+ [
+ {
+ "name": "cpus",
+ "reservation": {
+ "principal": "test-principal"
+ },
+ "role": "role1",
+ "scalar": {
+ "value": 1.0
+ },
+ "type": "SCALAR"
+ },
+ {
+ "name": "mem",
+ "reservation": {
+ "principal": "test-principal"
+ },
+ "role": "role1",
+ "scalar": {
+ "value": 256.0
+ },
+ "type": "SCALAR"
+ }
+ ])~");
+
+ ASSERT_SOME(expectedUsed);
+
+ Try<JSON::Value> expectedOffered = JSON::parse(
+ R"~(
+ [
+ {
+ "disk": {
+ "persistence": {
+ "id": "id1"
+ },
+ "volume": {
+ "container_path": "path1",
+ "mode": "RW"
+ }
+ },
+ "name": "disk",
+ "reservation": {
+ "principal": "test-principal"
+ },
+ "role": "role1",
+ "scalar": {
+ "value": 64.0
+ },
+ "type": "SCALAR"
+ },
+ {
+ "name": "mem",
+ "reservation": {
+ "principal": "test-principal"
+ },
+ "role": "role1",
+ "scalar": {
+ "value": 256.0
+ },
+ "type": "SCALAR"
+ },
+ {
+ "name": "disk",
+ "reservation": {
+ "principal": "test-principal"
+ },
+ "role": "role1",
+ "scalar": {
+ "value": 960.0
+ },
+ "type": "SCALAR"
+ },
+ {
+ "name": "cpus",
+ "role": "*",
+ "scalar": {
+ "value": 3.0
+ },
+ "type": "SCALAR"
+ },
+ {
+ "name": "mem",
+ "role": "*",
+ "scalar": {
+ "value": 1536.0
+ },
+ "type": "SCALAR"
+ },
+ {
+ "name": "disk",
+ "role": "*",
+ "scalar": {
+ "value": 3072.0
+ },
+ "type": "SCALAR"
+ },
+ {
+ "name": "ports",
+ "ranges": {
+ "range": [
+ {
+ "begin": 31000,
+ "end": 32000
+ }
+ ]
+ },
+ "role": "*",
+ "type": "RANGES"
+ }
+ ])~");
+
+ ASSERT_SOME(expectedOffered);
+
+ JSON::Value reservedValue = slaveObject.values["reserved_resources_full"];
+ EXPECT_EQ(expectedReserved.get(), reservedValue);
+
+ JSON::Value usedValue = slaveObject.values["used_resources_full"];
+ EXPECT_EQ(expectedUsed.get(), usedValue);
+
+ JSON::Value offeredValue = slaveObject.values["offered_resources_full"];
+ EXPECT_EQ(expectedOffered.get(), offeredValue);
+
+ driver.stop();
+ driver.join();
+
+ Shutdown();
+}
+
} // namespace tests {
} // namespace internal {
} // namespace mesos {
[prev in list] [next in list] [prev in thread] [next in thread]
Configure |
About |
News |
Add a list |
Sponsored by KoreLogic