diff options
Diffstat (limited to '0019-xen-sched-introduce-cpupool_update_node_affinity.patch')
-rw-r--r-- | 0019-xen-sched-introduce-cpupool_update_node_affinity.patch | 257 |
1 files changed, 257 insertions, 0 deletions
diff --git a/0019-xen-sched-introduce-cpupool_update_node_affinity.patch b/0019-xen-sched-introduce-cpupool_update_node_affinity.patch new file mode 100644 index 0000000..957d0fe --- /dev/null +++ b/0019-xen-sched-introduce-cpupool_update_node_affinity.patch @@ -0,0 +1,257 @@ +From d4e971ad12dd27913dffcf96b5de378ea7b476e1 Mon Sep 17 00:00:00 2001 +From: Juergen Gross <jgross@suse.com> +Date: Tue, 11 Oct 2022 14:59:40 +0200 +Subject: [PATCH 19/26] xen/sched: introduce cpupool_update_node_affinity() + +For updating the node affinities of all domains in a cpupool add a new +function cpupool_update_node_affinity(). + +In order to avoid multiple allocations of cpumasks carve out memory +allocation and freeing from domain_update_node_affinity() into new +helpers, which can be used by cpupool_update_node_affinity(). + +Modify domain_update_node_affinity() to take an additional parameter +for passing the allocated memory in and to allocate and free the memory +via the new helpers in case NULL was passed. + +This will help later to pre-allocate the cpumasks in order to avoid +allocations in stop-machine context. + +Signed-off-by: Juergen Gross <jgross@suse.com> +Reviewed-by: Jan Beulich <jbeulich@suse.com> +Acked-by: Andrew Cooper <andrew.cooper3@citrix.com> +Tested-by: Andrew Cooper <andrew.cooper3@citrix.com> +master commit: a83fa1e2b96ace65b45dde6954d67012633a082b +master date: 2022-09-05 11:42:30 +0100 +--- + xen/common/sched/core.c | 54 ++++++++++++++++++++++++++------------ + xen/common/sched/cpupool.c | 39 +++++++++++++++------------ + xen/common/sched/private.h | 7 +++++ + xen/include/xen/sched.h | 9 ++++++- + 4 files changed, 74 insertions(+), 35 deletions(-) + +diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c +index f07bd2681fcb..065a83eca912 100644 +--- a/xen/common/sched/core.c ++++ b/xen/common/sched/core.c +@@ -1824,9 +1824,28 @@ int vcpu_affinity_domctl(struct domain *d, uint32_t cmd, + return ret; + } + +-void domain_update_node_affinity(struct domain *d) ++bool alloc_affinity_masks(struct affinity_masks *affinity) + { +- cpumask_var_t dom_cpumask, dom_cpumask_soft; ++ if ( !alloc_cpumask_var(&affinity->hard) ) ++ return false; ++ if ( !alloc_cpumask_var(&affinity->soft) ) ++ { ++ free_cpumask_var(affinity->hard); ++ return false; ++ } ++ ++ return true; ++} ++ ++void free_affinity_masks(struct affinity_masks *affinity) ++{ ++ free_cpumask_var(affinity->soft); ++ free_cpumask_var(affinity->hard); ++} ++ ++void domain_update_node_aff(struct domain *d, struct affinity_masks *affinity) ++{ ++ struct affinity_masks masks; + cpumask_t *dom_affinity; + const cpumask_t *online; + struct sched_unit *unit; +@@ -1836,14 +1855,16 @@ void domain_update_node_affinity(struct domain *d) + if ( !d->vcpu || !d->vcpu[0] ) + return; + +- if ( !zalloc_cpumask_var(&dom_cpumask) ) +- return; +- if ( !zalloc_cpumask_var(&dom_cpumask_soft) ) ++ if ( !affinity ) + { +- free_cpumask_var(dom_cpumask); +- return; ++ affinity = &masks; ++ if ( !alloc_affinity_masks(affinity) ) ++ return; + } + ++ cpumask_clear(affinity->hard); ++ cpumask_clear(affinity->soft); ++ + online = cpupool_domain_master_cpumask(d); + + spin_lock(&d->node_affinity_lock); +@@ -1864,22 +1885,21 @@ void domain_update_node_affinity(struct domain *d) + */ + for_each_sched_unit ( d, unit ) + { +- cpumask_or(dom_cpumask, dom_cpumask, unit->cpu_hard_affinity); +- cpumask_or(dom_cpumask_soft, dom_cpumask_soft, +- unit->cpu_soft_affinity); ++ cpumask_or(affinity->hard, affinity->hard, unit->cpu_hard_affinity); ++ cpumask_or(affinity->soft, affinity->soft, unit->cpu_soft_affinity); + } + /* Filter out non-online cpus */ +- cpumask_and(dom_cpumask, dom_cpumask, online); +- ASSERT(!cpumask_empty(dom_cpumask)); ++ cpumask_and(affinity->hard, affinity->hard, online); ++ ASSERT(!cpumask_empty(affinity->hard)); + /* And compute the intersection between hard, online and soft */ +- cpumask_and(dom_cpumask_soft, dom_cpumask_soft, dom_cpumask); ++ cpumask_and(affinity->soft, affinity->soft, affinity->hard); + + /* + * If not empty, the intersection of hard, soft and online is the + * narrowest set we want. If empty, we fall back to hard&online. + */ +- dom_affinity = cpumask_empty(dom_cpumask_soft) ? +- dom_cpumask : dom_cpumask_soft; ++ dom_affinity = cpumask_empty(affinity->soft) ? affinity->hard ++ : affinity->soft; + + nodes_clear(d->node_affinity); + for_each_cpu ( cpu, dom_affinity ) +@@ -1888,8 +1908,8 @@ void domain_update_node_affinity(struct domain *d) + + spin_unlock(&d->node_affinity_lock); + +- free_cpumask_var(dom_cpumask_soft); +- free_cpumask_var(dom_cpumask); ++ if ( affinity == &masks ) ++ free_affinity_masks(affinity); + } + + typedef long ret_t; +diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c +index 8c6e6eb9ccd5..45b6ff99561a 100644 +--- a/xen/common/sched/cpupool.c ++++ b/xen/common/sched/cpupool.c +@@ -401,6 +401,25 @@ int cpupool_move_domain(struct domain *d, struct cpupool *c) + return ret; + } + ++/* Update affinities of all domains in a cpupool. */ ++static void cpupool_update_node_affinity(const struct cpupool *c) ++{ ++ struct affinity_masks masks; ++ struct domain *d; ++ ++ if ( !alloc_affinity_masks(&masks) ) ++ return; ++ ++ rcu_read_lock(&domlist_read_lock); ++ ++ for_each_domain_in_cpupool(d, c) ++ domain_update_node_aff(d, &masks); ++ ++ rcu_read_unlock(&domlist_read_lock); ++ ++ free_affinity_masks(&masks); ++} ++ + /* + * assign a specific cpu to a cpupool + * cpupool_lock must be held +@@ -408,7 +427,6 @@ int cpupool_move_domain(struct domain *d, struct cpupool *c) + static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu) + { + int ret; +- struct domain *d; + const cpumask_t *cpus; + + cpus = sched_get_opt_cpumask(c->gran, cpu); +@@ -433,12 +451,7 @@ static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu) + + rcu_read_unlock(&sched_res_rculock); + +- rcu_read_lock(&domlist_read_lock); +- for_each_domain_in_cpupool(d, c) +- { +- domain_update_node_affinity(d); +- } +- rcu_read_unlock(&domlist_read_lock); ++ cpupool_update_node_affinity(c); + + return 0; + } +@@ -447,18 +460,14 @@ static int cpupool_unassign_cpu_finish(struct cpupool *c) + { + int cpu = cpupool_moving_cpu; + const cpumask_t *cpus; +- struct domain *d; + int ret; + + if ( c != cpupool_cpu_moving ) + return -EADDRNOTAVAIL; + +- /* +- * We need this for scanning the domain list, both in +- * cpu_disable_scheduler(), and at the bottom of this function. +- */ + rcu_read_lock(&domlist_read_lock); + ret = cpu_disable_scheduler(cpu); ++ rcu_read_unlock(&domlist_read_lock); + + rcu_read_lock(&sched_res_rculock); + cpus = get_sched_res(cpu)->cpus; +@@ -485,11 +494,7 @@ static int cpupool_unassign_cpu_finish(struct cpupool *c) + } + rcu_read_unlock(&sched_res_rculock); + +- for_each_domain_in_cpupool(d, c) +- { +- domain_update_node_affinity(d); +- } +- rcu_read_unlock(&domlist_read_lock); ++ cpupool_update_node_affinity(c); + + return ret; + } +diff --git a/xen/common/sched/private.h b/xen/common/sched/private.h +index a870320146ef..2b04b01a0c0a 100644 +--- a/xen/common/sched/private.h ++++ b/xen/common/sched/private.h +@@ -593,6 +593,13 @@ affinity_balance_cpumask(const struct sched_unit *unit, int step, + cpumask_copy(mask, unit->cpu_hard_affinity); + } + ++struct affinity_masks { ++ cpumask_var_t hard; ++ cpumask_var_t soft; ++}; ++ ++bool alloc_affinity_masks(struct affinity_masks *affinity); ++void free_affinity_masks(struct affinity_masks *affinity); + void sched_rm_cpu(unsigned int cpu); + const cpumask_t *sched_get_opt_cpumask(enum sched_gran opt, unsigned int cpu); + void schedule_dump(struct cpupool *c); +diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h +index 9671062360ac..3f4225738a40 100644 +--- a/xen/include/xen/sched.h ++++ b/xen/include/xen/sched.h +@@ -655,8 +655,15 @@ static inline void get_knownalive_domain(struct domain *d) + ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTROYED)); + } + ++struct affinity_masks; ++ + int domain_set_node_affinity(struct domain *d, const nodemask_t *affinity); +-void domain_update_node_affinity(struct domain *d); ++void domain_update_node_aff(struct domain *d, struct affinity_masks *affinity); ++ ++static inline void domain_update_node_affinity(struct domain *d) ++{ ++ domain_update_node_aff(d, NULL); ++} + + /* + * To be implemented by each architecture, sanity checking the configuration +-- +2.37.3 + |