Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 314570 Details for
Bug 459337
dm-snap.c: Data read from snapshot may be corrupt if origin is being written to simultaneously
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
PATCH 1/2 --- add I/O tracking infrastructure
linux-2.6.18-dm-snapshot-track-snapshot-reads.patch (text/plain), 7.05 KB, created by
Mikuláš Patočka
on 2008-08-19 20:59:47 UTC
(
hide
)
Description:
PATCH 1/2 --- add I/O tracking infrastructure
Filename:
MIME Type:
Creator:
Mikuláš Patočka
Created:
2008-08-19 20:59:47 UTC
Size:
7.05 KB
patch
obsolete
>upstream commit: cd45daffd1f7b53aac0835b23e97f814ec3f10dc (2.6.27-rc1) > >dm snapshot: track snapshot reads > >Whenever a snapshot read gets mapped through to the origin, track it in >a per-snapshot hash table indexed by chunk number, using memory allocated >from a new per-snapshot mempool. > >We need to track these reads to avoid race conditions which will be fixed >by patches that follow. > >diff -p -u -r linux-2.6.18.x86_64.p4/drivers/md/dm-snap.c linux-2.6.18.x86_64/drivers/md/dm-snap.c >--- linux-2.6.18.x86_64.p4/drivers/md/dm-snap.c 2008-08-18 14:06:38.000000000 +0200 >+++ linux-2.6.18.x86_64/drivers/md/dm-snap.c 2008-08-19 03:45:17.000000000 +0200 >@@ -39,6 +39,11 @@ > */ > #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) > >+/* >+ * The size of the mempool used to track chunks in use. >+ */ >+#define MIN_IOS 256 >+ > struct workqueue_struct *ksnapd; > static void flush_queued_bios(void *data); > >@@ -92,6 +97,42 @@ static kmem_cache_t *exception_cache; > static kmem_cache_t *pending_cache; > static mempool_t *pending_pool; > >+struct dm_snap_tracked_chunk { >+ struct hlist_node node; >+ chunk_t chunk; >+}; >+ >+static struct kmem_cache *tracked_chunk_cache; >+ >+static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, >+ chunk_t chunk) >+{ >+ struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, >+ GFP_NOIO); >+ unsigned long flags; >+ >+ c->chunk = chunk; >+ >+ spin_lock_irqsave(&s->tracked_chunk_lock, flags); >+ hlist_add_head(&c->node, >+ &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); >+ spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); >+ >+ return c; >+} >+ >+static void stop_tracking_chunk(struct dm_snapshot *s, >+ struct dm_snap_tracked_chunk *c) >+{ >+ unsigned long flags; >+ >+ spin_lock_irqsave(&s->tracked_chunk_lock, flags); >+ hlist_del(&c->node); >+ spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); >+ >+ mempool_free(c, s->tracked_chunk_pool); >+} >+ > /* > * One of these per registered origin, held in the snapshot_origins hash > */ >@@ -491,6 +532,7 @@ static int set_chunk_size(struct dm_snap > static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) > { > struct dm_snapshot *s; >+ int i; > int r = -EINVAL; > char persistent; > char *origin_path; >@@ -573,11 +615,24 @@ static int snapshot_ctr(struct dm_target > goto bad5; > } > >+ s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, >+ tracked_chunk_cache); >+ if (!s->tracked_chunk_pool) { >+ ti->error = "Could not allocate tracked_chunk mempool for " >+ "tracking reads"; >+ goto bad6; >+ } >+ >+ for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) >+ INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); >+ >+ spin_lock_init(&s->tracked_chunk_lock); >+ > /* Metadata must only be loaded into one table at once */ > r = s->store.read_metadata(&s->store); > if (r < 0) { > ti->error = "Failed to read snapshot metadata"; >- goto bad6; >+ goto bad_load_and_register; > } else if (r > 0) { > s->valid = 0; > DMWARN("Snapshot is marked invalid."); >@@ -591,7 +646,7 @@ static int snapshot_ctr(struct dm_target > if (register_snapshot(s)) { > r = -EINVAL; > ti->error = "Cannot register snapshot origin"; >- goto bad6; >+ goto bad_load_and_register; > } > > ti->private = s; >@@ -599,6 +654,9 @@ static int snapshot_ctr(struct dm_target > > return 0; > >+ bad_load_and_register: >+ mempool_destroy(s->tracked_chunk_pool); >+ > bad6: > kcopyd_client_destroy(s->kcopyd_client); > >@@ -622,6 +680,9 @@ static int snapshot_ctr(struct dm_target > > static void snapshot_dtr(struct dm_target *ti) > { >+#ifdef CONFIG_DM_DEBUG >+ int i; >+#endif > struct dm_snapshot *s = (struct dm_snapshot *) ti->private; > > flush_workqueue(ksnapd); >@@ -630,6 +691,13 @@ static void snapshot_dtr(struct dm_targe > /* After this returns there can be no new kcopyd jobs. */ > unregister_snapshot(s); > >+#ifdef CONFIG_DM_DEBUG >+ for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) >+ BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); >+#endif >+ >+ mempool_destroy(s->tracked_chunk_pool); >+ > kcopyd_client_destroy(s->kcopyd_client); > > exit_exception_table(&s->pending, pending_cache); >@@ -979,14 +1047,10 @@ static int snapshot_map(struct dm_target > start_copy(pe); > goto out; > } >- } else >- /* >- * FIXME: this read path scares me because we >- * always use the origin when we have a pending >- * exception. However I can't think of a >- * situation where this is wrong - ejt. >- */ >+ } else { > bio->bi_bdev = s->origin->bdev; >+ map_context->ptr = track_chunk(s, chunk); >+ } > > out_unlock: > up_write(&s->lock); >@@ -994,6 +1058,18 @@ static int snapshot_map(struct dm_target > return r; > } > >+static int snapshot_end_io(struct dm_target *ti, struct bio *bio, >+ int error, union map_info *map_context) >+{ >+ struct dm_snapshot *s = ti->private; >+ struct dm_snap_tracked_chunk *c = map_context->ptr; >+ >+ if (c) >+ stop_tracking_chunk(s, c); >+ >+ return 0; >+} >+ > static void snapshot_resume(struct dm_target *ti) > { > struct dm_snapshot *s = (struct dm_snapshot *) ti->private; >@@ -1274,6 +1350,7 @@ static struct target_type snapshot_targe > .ctr = snapshot_ctr, > .dtr = snapshot_dtr, > .map = snapshot_map, >+ .end_io = snapshot_end_io, > .resume = snapshot_resume, > .status = snapshot_status, > }; >@@ -1321,11 +1398,22 @@ static int __init dm_snapshot_init(void) > goto bad4; > } > >+ tracked_chunk_cache = >+ kmem_cache_create("dm-tracked-chunk", >+ sizeof(struct dm_snap_tracked_chunk), >+ __alignof__(struct dm_snap_tracked_chunk), >+ 0, NULL, NULL); >+ if (!tracked_chunk_cache) { >+ DMERR("Couldn't create cache to track chunks in use."); >+ r = -ENOMEM; >+ goto bad5; >+ } >+ > pending_pool = mempool_create_slab_pool(128, pending_cache); > if (!pending_pool) { > DMERR("Couldn't create pending pool."); > r = -ENOMEM; >- goto bad5; >+ goto bad_pending_pool; > } > > ksnapd = create_singlethread_workqueue("ksnapd"); >@@ -1339,6 +1427,8 @@ static int __init dm_snapshot_init(void) > > bad6: > mempool_destroy(pending_pool); >+ bad_pending_pool: >+ kmem_cache_destroy(tracked_chunk_cache); > bad5: > kmem_cache_destroy(pending_cache); > bad4: >@@ -1370,6 +1460,7 @@ static void __exit dm_snapshot_exit(void > mempool_destroy(pending_pool); > kmem_cache_destroy(pending_cache); > kmem_cache_destroy(exception_cache); >+ kmem_cache_destroy(tracked_chunk_cache); > } > > /* Module hooks */ >diff -p -u -r linux-2.6.18.x86_64.p4/drivers/md/dm-snap.h linux-2.6.18.x86_64/drivers/md/dm-snap.h >--- linux-2.6.18.x86_64.p4/drivers/md/dm-snap.h 2008-08-18 14:06:36.000000000 +0200 >+++ linux-2.6.18.x86_64/drivers/md/dm-snap.h 2008-08-19 03:02:07.000000000 +0200 >@@ -132,6 +132,10 @@ struct exception_store { > void *context; > }; > >+#define DM_TRACKED_CHUNK_HASH_SIZE 16 >+#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ >+ (DM_TRACKED_CHUNK_HASH_SIZE - 1)) >+ > struct dm_snapshot { > uint64_t features; > struct rw_semaphore lock; >@@ -177,6 +181,11 @@ struct dm_snapshot { > > /* The on disk metadata handler */ > struct exception_store store; >+ >+ /* Chunks with outstanding reads */ >+ mempool_t *tracked_chunk_pool; >+ spinlock_t tracked_chunk_lock; >+ struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; > }; > > /*
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 459337
: 314570 |
314571
|
314572
|
314573