Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 315493 Details for
Bug 460845
Nested LVM can cause deadlock due to kcopyd
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
patch 1/2
01-dm-kcopyd-per-device-thread.patch (text/plain), 7.82 KB, created by
Mikuláš Patočka
on 2008-09-01 21:44:46 UTC
(
hide
)
Description:
patch 1/2
Filename:
MIME Type:
Creator:
Mikuláš Patočka
Created:
2008-09-01 21:44:46 UTC
Size:
7.82 KB
patch
obsolete
>diff -p -u -r linux-2.6.18.x86_64.orig/drivers/md/kcopyd.c linux-2.6.18.x86_64/drivers/md/kcopyd.c >--- linux-2.6.18.x86_64.orig/drivers/md/kcopyd.c 2008-09-01 21:42:53.000000000 +0200 >+++ linux-2.6.18.x86_64/drivers/md/kcopyd.c 2008-09-01 22:33:10.000000000 +0200 >@@ -25,14 +25,6 @@ > > #include "kcopyd.h" > >-static struct workqueue_struct *_kcopyd_wq; >-static struct work_struct _kcopyd_work; >- >-static inline void wake(void) >-{ >- queue_work(_kcopyd_wq, &_kcopyd_work); >-} >- > /*----------------------------------------------------------------- > * Each kcopyd client has its own little pool of preallocated > * pages for kcopyd io. >@@ -50,8 +42,30 @@ struct kcopyd_client { > #ifndef __GENKSYMS__ > struct dm_io_client *io_client; > #endif >+ >+ struct workqueue_struct *kcopyd_wq; >+ struct work_struct kcopyd_work; >+ >+/* >+ * We maintain three lists of jobs: >+ * >+ * i) jobs waiting for pages >+ * ii) jobs that have pages, and are waiting for the io to be issued. >+ * iii) jobs that have completed. >+ * >+ * All three of these are protected by job_lock. >+ */ >+ spinlock_t job_lock; >+ struct list_head complete_jobs; >+ struct list_head io_jobs; >+ struct list_head pages_jobs; > }; > >+static void wake(struct kcopyd_client *kc) >+{ >+ queue_work(kc->kcopyd_wq, &kc->kcopyd_work); >+} >+ > static struct page_list *alloc_pl(void) > { > struct page_list *pl; >@@ -209,21 +223,6 @@ struct kcopyd_job { > static kmem_cache_t *_job_cache; > static mempool_t *_job_pool; > >-/* >- * We maintain three lists of jobs: >- * >- * i) jobs waiting for pages >- * ii) jobs that have pages, and are waiting for the io to be issued. >- * iii) jobs that have completed. >- * >- * All three of these are protected by job_lock. >- */ >-static DEFINE_SPINLOCK(_job_lock); >- >-static LIST_HEAD(_complete_jobs); >-static LIST_HEAD(_io_jobs); >-static LIST_HEAD(_pages_jobs); >- > static int jobs_init(void) > { > _job_cache = kmem_cache_create("kcopyd-jobs", >@@ -244,10 +243,6 @@ static int jobs_init(void) > > static void jobs_exit(void) > { >- BUG_ON(!list_empty(&_complete_jobs)); >- BUG_ON(!list_empty(&_io_jobs)); >- BUG_ON(!list_empty(&_pages_jobs)); >- > mempool_destroy(_job_pool); > kmem_cache_destroy(_job_cache); > _job_pool = NULL; >@@ -258,18 +253,19 @@ static void jobs_exit(void) > * Functions to push and pop a job onto the head of a given job > * list. > */ >-static inline struct kcopyd_job *pop(struct list_head *jobs) >+static struct kcopyd_job *pop(struct list_head *jobs, >+ struct kcopyd_client *kc) > { > struct kcopyd_job *job = NULL; > unsigned long flags; > >- spin_lock_irqsave(&_job_lock, flags); >+ spin_lock_irqsave(&kc->job_lock, flags); > > if (!list_empty(jobs)) { > job = list_entry(jobs->next, struct kcopyd_job, list); > list_del(&job->list); > } >- spin_unlock_irqrestore(&_job_lock, flags); >+ spin_unlock_irqrestore(&kc->job_lock, flags); > > return job; > } >@@ -277,10 +273,11 @@ static inline struct kcopyd_job *pop(str > static inline void push(struct list_head *jobs, struct kcopyd_job *job) > { > unsigned long flags; >+ struct kcopyd_client *kc = job->kc; > >- spin_lock_irqsave(&_job_lock, flags); >+ spin_lock_irqsave(&kc->job_lock, flags); > list_add_tail(&job->list, jobs); >- spin_unlock_irqrestore(&_job_lock, flags); >+ spin_unlock_irqrestore(&kc->job_lock, flags); > } > > /* >@@ -313,6 +310,7 @@ static int run_complete_job(struct kcopy > static void complete_io(unsigned long error, void *context) > { > struct kcopyd_job *job = (struct kcopyd_job *) context; >+ struct kcopyd_client *kc = job->kc; > > if (error) { > if (job->rw == WRITE) >@@ -321,21 +319,21 @@ static void complete_io(unsigned long er > job->read_err = 1; > > if (!test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) { >- push(&_complete_jobs, job); >- wake(); >+ push(&kc->complete_jobs, job); >+ wake(kc); > return; > } > } > > if (job->rw == WRITE) >- push(&_complete_jobs, job); >+ push(&kc->complete_jobs, job); > > else { > job->rw = WRITE; >- push(&_io_jobs, job); >+ push(&kc->io_jobs, job); > } > >- wake(); >+ wake(kc); > } > > /* >@@ -372,7 +370,7 @@ static int run_pages_job(struct kcopyd_j > r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages); > if (!r) { > /* this job is ready for io */ >- push(&_io_jobs, job); >+ push(&job->kc->io_jobs, job); > return 0; > } > >@@ -387,12 +385,13 @@ static int run_pages_job(struct kcopyd_j > * Run through a list for as long as possible. Returns the count > * of successful jobs. > */ >-static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *)) >+static int process_jobs(struct list_head *jobs, struct kcopyd_client *kc, >+ int (*fn) (struct kcopyd_job *)) > { > struct kcopyd_job *job; > int r, count = 0; > >- while ((job = pop(jobs))) { >+ while ((job = pop(jobs, kc))) { > > r = fn(job); > >@@ -402,7 +401,7 @@ static int process_jobs(struct list_head > job->write_err = (unsigned int) -1; > else > job->read_err = 1; >- push(&_complete_jobs, job); >+ push(&kc->complete_jobs, job); > break; > } > >@@ -424,8 +423,10 @@ static int process_jobs(struct list_head > /* > * kcopyd does this every time it's woken up. > */ >-static void do_work(void *ignored) >+static void do_work(void *data) > { >+ struct kcopyd_client *kc = data; >+ > /* > * The order that these are called is *very* important. > * complete jobs can free some pages for pages jobs. >@@ -433,9 +434,9 @@ static void do_work(void *ignored) > * list. io jobs call wake when they complete and it all > * starts again. > */ >- process_jobs(&_complete_jobs, run_complete_job); >- process_jobs(&_pages_jobs, run_pages_job); >- process_jobs(&_io_jobs, run_io_job); >+ process_jobs(&kc->complete_jobs, kc, run_complete_job); >+ process_jobs(&kc->pages_jobs, kc, run_pages_job); >+ process_jobs(&kc->io_jobs, kc, run_io_job); > } > > /* >@@ -445,9 +446,10 @@ static void do_work(void *ignored) > */ > static void dispatch_job(struct kcopyd_job *job) > { >- atomic_inc(&job->kc->nr_jobs); >- push(&_pages_jobs, job); >- wake(); >+ struct kcopyd_client *kc = job->kc; >+ atomic_inc(&kc->nr_jobs); >+ push(&kc->pages_jobs, job); >+ wake(kc); > } > > #define SUB_JOB_SIZE 128 >@@ -627,15 +629,7 @@ static int kcopyd_init(void) > return r; > } > >- _kcopyd_wq = create_singlethread_workqueue("kcopyd"); >- if (!_kcopyd_wq) { >- jobs_exit(); >- mutex_unlock(&kcopyd_init_lock); >- return -ENOMEM; >- } >- > kcopyd_clients++; >- INIT_WORK(&_kcopyd_work, do_work, NULL); > mutex_unlock(&kcopyd_init_lock); > return 0; > } >@@ -646,8 +640,6 @@ static void kcopyd_exit(void) > kcopyd_clients--; > if (!kcopyd_clients) { > jobs_exit(); >- destroy_workqueue(_kcopyd_wq); >- _kcopyd_wq = NULL; > } > mutex_unlock(&kcopyd_init_lock); > } >@@ -663,15 +655,31 @@ int kcopyd_client_create(unsigned int nr > > kc = kmalloc(sizeof(*kc), GFP_KERNEL); > if (!kc) { >+ r = -ENOMEM; > kcopyd_exit(); >- return -ENOMEM; >+ return r; > } > > spin_lock_init(&kc->lock); >+ spin_lock_init(&kc->job_lock); >+ INIT_LIST_HEAD(&kc->complete_jobs); >+ INIT_LIST_HEAD(&kc->io_jobs); >+ INIT_LIST_HEAD(&kc->pages_jobs); >+ >+ INIT_WORK(&kc->kcopyd_work, do_work, kc); >+ kc->kcopyd_wq = create_singlethread_workqueue("kcopyd"); >+ if (!kc->kcopyd_wq) { >+ r = -ENOMEM; >+ kfree(kc); >+ kcopyd_exit(); >+ return r; >+ } >+ > kc->pages = NULL; > kc->nr_pages = kc->nr_free_pages = 0; > r = client_alloc_pages(kc, nr_pages); > if (r) { >+ destroy_workqueue(kc->kcopyd_wq); > kfree(kc); > kcopyd_exit(); > return r; >@@ -681,6 +689,7 @@ int kcopyd_client_create(unsigned int nr > if (IS_ERR(kc->io_client)) { > r = PTR_ERR(kc->io_client); > client_free_pages(kc); >+ destroy_workqueue(kc->kcopyd_wq); > kfree(kc); > kcopyd_exit(); > return r; >@@ -699,6 +708,10 @@ void kcopyd_client_destroy(struct kcopyd > /* Wait for completion of all jobs submitted by this client. */ > wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs)); > >+ BUG_ON(!list_empty(&kc->complete_jobs)); >+ BUG_ON(!list_empty(&kc->io_jobs)); >+ BUG_ON(!list_empty(&kc->pages_jobs)); >+ destroy_workqueue(kc->kcopyd_wq); > dm_io_client_destroy(kc->io_client); > client_free_pages(kc); > client_del(kc);
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 460845
: 315493 |
315494