[prev in list] [next in list] [prev in thread] [next in thread]
List: lxc-devel
Subject: [lxc-devel] [lxd/master] tests: Reduce ceph pg_num down to 1
From: stgraber on Github <lxc-bot () linuxcontainers ! org>
Date: 2018-05-30 1:42:58
Message-ID: 20180530014258.50EAD5694A () mailman01 ! srv ! dcmtl ! stgraber ! net
[Download RAW message or body]
[Attachment #2 (text/x-mailbox)]
The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/4609
This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.
=== Description (from pull-request) ===
Signed-off-by: Stéphane Graber <stgraber@ubuntu.com>
[Attachment #3 (text/plain)]
From 9aa110d64ad7862609996037c3ead4b717857b6e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Stéphane Graber?= <stgraber@ubuntu.com>
Date: Tue, 29 May 2018 21:41:28 -0400
Subject: [PATCH] tests: Reduce ceph pg_num down to 1
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: Stéphane Graber <stgraber@ubuntu.com>
---
test/backends/ceph.sh | 2 +-
test/includes/clustering.sh | 2 +-
test/suites/clustering.sh | 2 +-
test/suites/storage_driver_ceph.sh | 6 +++---
test/suites/storage_local_volume_handling.sh | 4 ++--
5 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/test/backends/ceph.sh b/test/backends/ceph.sh
index 7cdaa2134..77b4ee363 100644
--- a/test/backends/ceph.sh
+++ b/test/backends/ceph.sh
@@ -15,7 +15,7 @@ ceph_configure() {
echo "==> Configuring CEPH backend in ${LXD_DIR}"
- lxc storage create "lxdtest-$(basename "${LXD_DIR}")" ceph volume.size%MB \
ceph.osd.pg_num=8 + lxc storage create "lxdtest-$(basename "${LXD_DIR}")" ceph \
volume.size%MB ceph.osd.pg_num=1 lxc profile device add default root disk path="/" \
pool="lxdtest-$(basename "${LXD_DIR}")" }
diff --git a/test/includes/clustering.sh b/test/includes/clustering.sh
index dc800b6e1..47a9d26e7 100644
--- a/test/includes/clustering.sh
+++ b/test/includes/clustering.sh
@@ -182,7 +182,7 @@ EOF
config:
source: lxdtest-$(basename "${TEST_DIR}")
volume.size: 25GB
- ceph.osd.pg_num: 8
+ ceph.osd.pg_num: 1
EOF
fi
cat >> "${LXD_DIR}/preseed.yaml" <<EOF
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 22fcfc0c6..3a4f52a38 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -404,7 +404,7 @@ test_clustering_storage() {
if [ "${driver}" = "lvm" ]; then
LXD_DIR="${LXD_TWO_DIR}" lxc storage create pool1 "${driver}" volume.size%MB
elif [ "${driver}" = "ceph" ]; then
- LXD_DIR="${LXD_TWO_DIR}" lxc storage create pool1 "${driver}" volume.size%MB \
ceph.osd.pg_num=8 + LXD_DIR="${LXD_TWO_DIR}" lxc storage create pool1 \
"${driver}" volume.size%MB ceph.osd.pg_num=1 else
LXD_DIR="${LXD_TWO_DIR}" lxc storage create pool1 "${driver}"
fi
diff --git a/test/suites/storage_driver_ceph.sh b/test/suites/storage_driver_ceph.sh
index b4211e693..174269642 100644
--- a/test/suites/storage_driver_ceph.sh
+++ b/test/suites/storage_driver_ceph.sh
@@ -19,7 +19,7 @@ test_storage_driver_ceph() {
fi
# shellcheck disable=SC1009
- lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool1" ceph volume.size%MB \
ceph.osd.pg_num=8 + lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool1" \
ceph volume.size%MB ceph.osd.pg_num=1
# Set default storage pool for image import.
lxc profile device add default root disk path="/" pool="lxdtest-$(basename \
"${LXD_DIR}")-pool1" @@ -31,7 +31,7 @@ test_storage_driver_ceph() {
ceph --cluster "${LXD_CEPH_CLUSTER}" osd pool create "lxdtest-$(basename \
"${LXD_DIR}")-existing-osd-pool" 32
# Let LXD use an already existing osd pool.
- lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool2" ceph \
source="lxdtest-$(basename "${LXD_DIR}")-existing-osd-pool" volume.size%MB \
ceph.osd.pg_num=8 + lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool2" \
ceph source="lxdtest-$(basename "${LXD_DIR}")-existing-osd-pool" volume.size%MB \
ceph.osd.pg_num=1
# Test that no invalid ceph storage pool configuration keys can be set.
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-ceph-pool-config" \
ceph lvm.thinpool_name=bla @@ -39,7 +39,7 @@ test_storage_driver_ceph() {
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-ceph-pool-config" \
ceph lvm.vg_name=bla
# Test that all valid ceph storage pool configuration keys can be set.
- lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-ceph-pool-config" \
ceph volume.block.filesystem=ext4 volume.block.mount_options=discard volume.size=2GB \
ceph.rbd.clone_copy=true ceph.osd.pg_num=8 + lxc storage create \
"lxdtest-$(basename "${LXD_DIR}")-valid-ceph-pool-config" ceph \
volume.block.filesystem=ext4 volume.block.mount_options=discard volume.size=2GB \
ceph.rbd.clone_copy=true ceph.osd.pg_num=1
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-ceph-pool-config"
# Muck around with some containers on various pools.
diff --git a/test/suites/storage_local_volume_handling.sh \
b/test/suites/storage_local_volume_handling.sh index 0c7dfdcc8..2a395d5bf 100644
--- a/test/suites/storage_local_volume_handling.sh
+++ b/test/suites/storage_local_volume_handling.sh
@@ -21,7 +21,7 @@ test_storage_local_volume_handling() {
fi
if storage_backend_available "ceph"; then
- lxc storage create "lxdtest-$(basename "${LXD_DIR}")-ceph" ceph volume.size%MB \
ceph.osd.pg_num=8 + lxc storage create "lxdtest-$(basename "${LXD_DIR}")-ceph" \
ceph volume.size%MB ceph.osd.pg_num=1 fi
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-dir" dir
@@ -58,7 +58,7 @@ test_storage_local_volume_handling() {
fi
if [ "$driver" = "ceph" ]; then
- pool_opts="volume.size%MB ceph.osd.pg_num=8"
+ pool_opts="volume.size%MB ceph.osd.pg_num=1"
fi
if [ "$driver" = "lvm" ]; then
[Attachment #4 (text/plain)]
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel
[prev in list] [next in list] [prev in thread] [next in thread]
Configure |
About |
News |
Add a list |
Sponsored by KoreLogic