@@ -177,17 +177,6 @@ static inline void parse_dt_topology(void) {}
177177static inline void update_cpu_capacity (unsigned int cpuid ) {}
178178#endif
179179
180- /*
181- * cpu topology table
182- */
183- struct cputopo_arm cpu_topology [NR_CPUS ];
184- EXPORT_SYMBOL_GPL (cpu_topology );
185-
186- const struct cpumask * cpu_coregroup_mask (int cpu )
187- {
188- return & cpu_topology [cpu ].core_sibling ;
189- }
190-
191180/*
192181 * The current assumption is that we can power gate each core independently.
193182 * This will be superseded by DT binding once available.
@@ -197,40 +186,14 @@ const struct cpumask *cpu_corepower_mask(int cpu)
197186 return & cpu_topology [cpu ].thread_sibling ;
198187}
199188
200- static void update_siblings_masks (unsigned int cpuid )
201- {
202- struct cputopo_arm * cpu_topo , * cpuid_topo = & cpu_topology [cpuid ];
203- int cpu ;
204-
205- /* update core and thread sibling masks */
206- for_each_possible_cpu (cpu ) {
207- cpu_topo = & cpu_topology [cpu ];
208-
209- if (cpuid_topo -> socket_id != cpu_topo -> socket_id )
210- continue ;
211-
212- cpumask_set_cpu (cpuid , & cpu_topo -> core_sibling );
213- if (cpu != cpuid )
214- cpumask_set_cpu (cpu , & cpuid_topo -> core_sibling );
215-
216- if (cpuid_topo -> core_id != cpu_topo -> core_id )
217- continue ;
218-
219- cpumask_set_cpu (cpuid , & cpu_topo -> thread_sibling );
220- if (cpu != cpuid )
221- cpumask_set_cpu (cpu , & cpuid_topo -> thread_sibling );
222- }
223- smp_wmb ();
224- }
225-
226189/*
227190 * store_cpu_topology is called at boot when only one cpu is running
228191 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
229192 * which prevents simultaneous write access to cpu_topology array
230193 */
231194void store_cpu_topology (unsigned int cpuid )
232195{
233- struct cputopo_arm * cpuid_topo = & cpu_topology [cpuid ];
196+ struct cpu_topology * cpuid_topo = & cpu_topology [cpuid ];
234197 unsigned int mpidr ;
235198
236199 /* If the cpu topology has been already set, just return */
@@ -250,12 +213,12 @@ void store_cpu_topology(unsigned int cpuid)
250213 /* core performance interdependency */
251214 cpuid_topo -> thread_id = MPIDR_AFFINITY_LEVEL (mpidr , 0 );
252215 cpuid_topo -> core_id = MPIDR_AFFINITY_LEVEL (mpidr , 1 );
253- cpuid_topo -> socket_id = MPIDR_AFFINITY_LEVEL (mpidr , 2 );
216+ cpuid_topo -> package_id = MPIDR_AFFINITY_LEVEL (mpidr , 2 );
254217 } else {
255218 /* largely independent cores */
256219 cpuid_topo -> thread_id = -1 ;
257220 cpuid_topo -> core_id = MPIDR_AFFINITY_LEVEL (mpidr , 0 );
258- cpuid_topo -> socket_id = MPIDR_AFFINITY_LEVEL (mpidr , 1 );
221+ cpuid_topo -> package_id = MPIDR_AFFINITY_LEVEL (mpidr , 1 );
259222 }
260223 } else {
261224 /*
@@ -265,7 +228,7 @@ void store_cpu_topology(unsigned int cpuid)
265228 */
266229 cpuid_topo -> thread_id = -1 ;
267230 cpuid_topo -> core_id = 0 ;
268- cpuid_topo -> socket_id = -1 ;
231+ cpuid_topo -> package_id = -1 ;
269232 }
270233
271234 update_siblings_masks (cpuid );
@@ -275,7 +238,7 @@ void store_cpu_topology(unsigned int cpuid)
275238 pr_info ("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n" ,
276239 cpuid , cpu_topology [cpuid ].thread_id ,
277240 cpu_topology [cpuid ].core_id ,
278- cpu_topology [cpuid ].socket_id , mpidr );
241+ cpu_topology [cpuid ].package_id , mpidr );
279242}
280243
281244static inline int cpu_corepower_flags (void )
@@ -298,18 +261,7 @@ static struct sched_domain_topology_level arm_topology[] = {
298261 */
299262void __init init_cpu_topology (void )
300263{
301- unsigned int cpu ;
302-
303- /* init core mask and capacity */
304- for_each_possible_cpu (cpu ) {
305- struct cputopo_arm * cpu_topo = & (cpu_topology [cpu ]);
306-
307- cpu_topo -> thread_id = -1 ;
308- cpu_topo -> core_id = -1 ;
309- cpu_topo -> socket_id = -1 ;
310- cpumask_clear (& cpu_topo -> core_sibling );
311- cpumask_clear (& cpu_topo -> thread_sibling );
312- }
264+ reset_cpu_topology ();
313265 smp_wmb ();
314266
315267 parse_dt_topology ();
0 commit comments