Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 295029 Details for
Bug 430938
RHEL5.2: Improved Power Now! in Xen support on 2nd generation Opteron systems
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
add vcpu lock/unlock functions
RHEL5.2_new_vcpu_lock_affinity.patch (text/plain), 4.51 KB, created by
Bhavna Sarathy
on 2008-02-15 18:41:20 UTC
(
hide
)
Description:
add vcpu lock/unlock functions
Filename:
MIME Type:
Creator:
Bhavna Sarathy
Created:
2008-02-15 18:41:20 UTC
Size:
4.51 KB
patch
obsolete
>Resolves BZ 430938 > >Introduce new vcpu_lock_affinity() and vcpu_unlock_affinity() helper >functions for use by x86's continue_hypercall_on_cpu(). > >This has two advantages: >1. We can lock out ordinary vcpu_set_affinity() commands from dom0. >2. We avoid the (in this case bogus) check for dom0_vcpus_pin. > > >--- xen/include/xen/sched.h.lockorig 2008-02-06 16:31:14.000000000 -0500 >+++ xen/include/xen/sched.h 2008-02-06 16:32:09.000000000 -0500 >@@ -119,6 +119,8 @@ struct vcpu > bool_t defer_shutdown; > /* VCPU is paused following shutdown request (d->is_shutting_down)? */ > bool_t paused_for_shutdown; >+ /* VCPU affinity is temporarily locked from controller changes? */ >+ bool_t affinity_locked; > > unsigned long pause_flags; > atomic_t pause_count; >@@ -476,6 +478,8 @@ void cpu_init(void); > > void vcpu_force_reschedule(struct vcpu *v); > int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity); >+int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity); >+void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity); > > void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate); > >--- xen/arch/x86/domain.c.lockorig 2008-02-06 16:30:44.000000000 -0500 >+++ xen/arch/x86/domain.c 2008-02-06 16:35:27.000000000 -0500 >@@ -1355,6 +1355,7 @@ static void continue_hypercall_on_cpu_he > { > struct cpu_user_regs *regs = guest_cpu_user_regs(); > struct migrate_info *info = v->arch.continue_info; >+ cpumask_t mask = info->saved_affinity; > > regs->eax = info->func(info->data); > >@@ -1363,7 +1364,7 @@ static void continue_hypercall_on_cpu_he > > xfree(info); > >- vcpu_set_affinity(v, &v->cpu_affinity); >+ vcpu_unlock_affinity(v, &mask); > schedule_tail(v); > } > >@@ -1371,7 +1372,6 @@ int continue_hypercall_on_cpu(int cpu, l > { > struct vcpu *v = current; > struct migrate_info *info; >- cpumask_t mask = cpumask_of_cpu(cpu); > int rc; > > if ( cpu == smp_processor_id() ) >@@ -1384,12 +1384,12 @@ int continue_hypercall_on_cpu(int cpu, l > info->func = func; > info->data = data; > info->saved_schedule_tail = v->arch.schedule_tail; >- info->saved_affinity = v->cpu_affinity; >+ info->saved_affinity = cpumask_of_cpu(cpu); > > v->arch.schedule_tail = continue_hypercall_on_cpu_helper; > v->arch.continue_info = info; > >- rc = vcpu_set_affinity(v, &mask); >+ rc = vcpu_lock_affinity(v, &info->saved_affinity); > if ( rc ) > { > v->arch.schedule_tail = info->saved_schedule_tail; >--- xen/common/schedule.c.lockorig 2008-02-06 16:31:00.000000000 -0500 >+++ xen/common/schedule.c 2008-02-06 16:37:37.000000000 -0500 >@@ -264,12 +264,11 @@ void vcpu_force_reschedule(struct vcpu * > } > } > >-int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity) >+static int __vcpu_set_affinity( >+ struct vcpu *v, cpumask_t *affinity, >+ bool_t old_lock_status, bool_t new_lock_status) > { >- cpumask_t online_affinity; >- >- if ( (v->domain->domain_id == 0) && opt_dom0_vcpus_pin ) >- return -EINVAL; >+ cpumask_t online_affinity, old_affinity; > > cpus_and(online_affinity, *affinity, cpu_online_map); > if ( cpus_empty(online_affinity) ) >@@ -277,7 +276,18 @@ int vcpu_set_affinity(struct vcpu *v, cp > > vcpu_schedule_lock_irq(v); > >+ if ( v->affinity_locked != old_lock_status ) >+ { >+ BUG_ON(!v->affinity_locked); >+ vcpu_schedule_unlock_irq(v); >+ return -EBUSY; >+ } >+ >+ v->affinity_locked = new_lock_status; >+ >+ old_affinity = v->cpu_affinity; > v->cpu_affinity = *affinity; >+ *affinity = old_affinity; > if ( !cpu_isset(v->processor, v->cpu_affinity) ) > set_bit(_VPF_migrating, &v->pause_flags); > >@@ -292,6 +302,31 @@ int vcpu_set_affinity(struct vcpu *v, cp > return 0; > } > >+int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity) >+{ >+ if ( (v->domain->domain_id == 0) && opt_dom0_vcpus_pin ) >+ return -EINVAL; >+ return __vcpu_set_affinity(v, affinity, 0, 0); >+} >+ >+int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity) >+{ >+ return __vcpu_set_affinity(v, affinity, 0, 1); >+} >+ >+void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity) >+{ >+ cpumask_t online_affinity; >+ >+ /* Do not fail if no CPU in old affinity mask is online. */ >+ cpus_and(online_affinity, *affinity, cpu_online_map); >+ if ( cpus_empty(online_affinity) ) >+ *affinity = cpu_online_map; >+ >+ if ( __vcpu_set_affinity(v, affinity, 1, 0) != 0 ) >+ BUG(); >+} >+ > /* Block the currently-executing domain until a pertinent event occurs. */ > static long do_block(void) > {
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 430938
:
295028
| 295029 |
295030
|
295031