From 36684a4a2e6f031a32c6d090869e4e7a08913667 Mon Sep 17 00:00:00 2001 From: 0x3bb <0x3bb@3bb.io> Date: Sat, 10 Aug 2024 15:12:55 +0000 Subject: [PATCH] Update 2024-08-10-recovering-ceph-cluster.md --- 2024-08-10-recovering-ceph-cluster.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/2024-08-10-recovering-ceph-cluster.md b/2024-08-10-recovering-ceph-cluster.md index 8962000..0a86766 100644 --- a/2024-08-10-recovering-ceph-cluster.md +++ b/2024-08-10-recovering-ceph-cluster.md @@ -578,7 +578,7 @@ The monitor `osdmap` still contained the bad EC profile. `ceph-monstore-tool /tmp/mon-bak get osdmap > osdmap.bad` -osdmaptool --dump json osdmap.bad | grep -i profile +`osdmaptool --dump json osdmap.bad | grep -i profile` ``` "erasure_code_profiles":{ @@ -597,6 +597,8 @@ After rebuilding the monstore... `ceph-monstore-tool /tmp/mon-a get osdmap > osdmap.good` +`osdmaptool --dump json osdmap.good | grep -i profile` + ``` "erasure_code_profiles":{ "ceph-objectstore.rgw.buckets.data_ecprofile":{ @@ -611,7 +613,7 @@ After rebuilding the monstore... ``` Therefore, it seems as if I could have attempted to rebuild the monstore first, -possibly circumventing the `ECAssert` errors. The placement groups on `osd-0` were +possibly circumventing the _EC Assert_ errors. The placement groups on `osd-0` were still mapping to 3 OSDs, not 5. ```