summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJeff Layton <jlayton@redhat.com>2017-04-04 14:39:39 +0200
committerIlya Dryomov <idryomov@gmail.com>2017-05-04 09:19:21 +0200
commitfc36d0a42c19870e57a542c2dd0972185584d407 (patch)
treeedcd29278e2f2260d3efd0ef3f462540af0f2f6c /net
parentlibceph: allow requests to return immediately on full conditions if caller wi... (diff)
downloadlinux-fc36d0a42c19870e57a542c2dd0972185584d407.tar.xz
linux-fc36d0a42c19870e57a542c2dd0972185584d407.zip
libceph: abort already submitted but abortable requests when map or pool goes full
When a Ceph volume hits capacity, a flag is set in the OSD map to indicate that, and a new map is sprayed around the cluster. With cephfs we want it to shut down any abortable requests that are in progress with an -ENOSPC error as they'd just hang otherwise. Add a new ceph_osdc_abort_on_full helper function to handle this. It will first check whether there is an out-of-space condition in the cluster and then walk the tree and abort any request that has r_abort_on_full set with a -ENOSPC error. Call this new function directly whenever we get a new OSD map. Signed-off-by: Jeff Layton <jlayton@redhat.com> Reviewed-by: Ilya Dryomov <idryomov@gmail.com> Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Diffstat (limited to 'net')
-rw-r--r--net/ceph/osd_client.c34
1 files changed, 34 insertions, 0 deletions
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 52a2019a2b64..55b7585ccefd 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1807,6 +1807,39 @@ static void abort_request(struct ceph_osd_request *req, int err)
complete_request(req, err);
}
+/*
+ * Drop all pending requests that are stalled waiting on a full condition to
+ * clear, and complete them with ENOSPC as the return code.
+ */
+static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
+{
+ struct rb_node *n;
+
+ dout("enter abort_on_full\n");
+
+ if (!ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && !have_pool_full(osdc))
+ goto out;
+
+ for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
+ struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
+ struct rb_node *m;
+
+ m = rb_first(&osd->o_requests);
+ while (m) {
+ struct ceph_osd_request *req = rb_entry(m,
+ struct ceph_osd_request, r_node);
+ m = rb_next(m);
+
+ if (req->r_abort_on_full &&
+ (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
+ pool_full(osdc, req->r_t.target_oloc.pool)))
+ abort_request(req, -ENOSPC);
+ }
+ }
+out:
+ dout("return abort_on_full\n");
+}
+
static void check_pool_dne(struct ceph_osd_request *req)
{
struct ceph_osd_client *osdc = req->r_osdc;
@@ -3265,6 +3298,7 @@ done:
kick_requests(osdc, &need_resend, &need_resend_linger);
+ ceph_osdc_abort_on_full(osdc);
ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
osdc->osdmap->epoch);
up_write(&osdc->lock);