@@ -333,105 +333,6 @@ __visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs)
333333
334334
335335#ifdef CONFIG_HOTPLUG_CPU
336-
337- /* These two declarations are only used in check_irq_vectors_for_cpu_disable()
338- * below, which is protected by stop_machine(). Putting them on the stack
339- * results in a stack frame overflow. Dynamically allocating could result in a
340- * failure so declare these two cpumasks as global.
341- */
342- static struct cpumask affinity_new , online_new ;
343-
344- /*
345- * This cpu is going to be removed and its vectors migrated to the remaining
346- * online cpus. Check to see if there are enough vectors in the remaining cpus.
347- * This function is protected by stop_machine().
348- */
349- int check_irq_vectors_for_cpu_disable (void )
350- {
351- unsigned int this_cpu , vector , this_count , count ;
352- struct irq_desc * desc ;
353- struct irq_data * data ;
354- int cpu ;
355-
356- this_cpu = smp_processor_id ();
357- cpumask_copy (& online_new , cpu_online_mask );
358- cpumask_clear_cpu (this_cpu , & online_new );
359-
360- this_count = 0 ;
361- for (vector = FIRST_EXTERNAL_VECTOR ; vector < NR_VECTORS ; vector ++ ) {
362- desc = __this_cpu_read (vector_irq [vector ]);
363- if (IS_ERR_OR_NULL (desc ))
364- continue ;
365- /*
366- * Protect against concurrent action removal, affinity
367- * changes etc.
368- */
369- raw_spin_lock (& desc -> lock );
370- data = irq_desc_get_irq_data (desc );
371- cpumask_copy (& affinity_new ,
372- irq_data_get_affinity_mask (data ));
373- cpumask_clear_cpu (this_cpu , & affinity_new );
374-
375- /* Do not count inactive or per-cpu irqs. */
376- if (!irq_desc_has_action (desc ) || irqd_is_per_cpu (data )) {
377- raw_spin_unlock (& desc -> lock );
378- continue ;
379- }
380-
381- raw_spin_unlock (& desc -> lock );
382- /*
383- * A single irq may be mapped to multiple cpu's
384- * vector_irq[] (for example IOAPIC cluster mode). In
385- * this case we have two possibilities:
386- *
387- * 1) the resulting affinity mask is empty; that is
388- * this the down'd cpu is the last cpu in the irq's
389- * affinity mask, or
390- *
391- * 2) the resulting affinity mask is no longer a
392- * subset of the online cpus but the affinity mask is
393- * not zero; that is the down'd cpu is the last online
394- * cpu in a user set affinity mask.
395- */
396- if (cpumask_empty (& affinity_new ) ||
397- !cpumask_subset (& affinity_new , & online_new ))
398- this_count ++ ;
399- }
400- /* No need to check any further. */
401- if (!this_count )
402- return 0 ;
403-
404- count = 0 ;
405- for_each_online_cpu (cpu ) {
406- if (cpu == this_cpu )
407- continue ;
408- /*
409- * We scan from FIRST_EXTERNAL_VECTOR to first system
410- * vector. If the vector is marked in the used vectors
411- * bitmap or an irq is assigned to it, we don't count
412- * it as available.
413- *
414- * As this is an inaccurate snapshot anyway, we can do
415- * this w/o holding vector_lock.
416- */
417- for (vector = FIRST_EXTERNAL_VECTOR ;
418- vector < FIRST_SYSTEM_VECTOR ; vector ++ ) {
419- if (!test_bit (vector , system_vectors ) &&
420- IS_ERR_OR_NULL (per_cpu (vector_irq , cpu )[vector ])) {
421- if (++ count == this_count )
422- return 0 ;
423- }
424- }
425- }
426-
427- if (count < this_count ) {
428- pr_warn ("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n" ,
429- this_cpu , this_count , count );
430- return - ERANGE ;
431- }
432- return 0 ;
433- }
434-
435336/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
436337void fixup_irqs (void )
437338{
0 commit comments