@@ -4453,7 +4453,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
44534453 unsigned long i , j ;
44544454
44554455 /* protect against switching io scheduler */
4456- mutex_lock (& q -> sysfs_lock );
4456+ lockdep_assert_held (& q -> sysfs_lock );
4457+
44574458 for (i = 0 ; i < set -> nr_hw_queues ; i ++ ) {
44584459 int old_node ;
44594460 int node = blk_mq_get_hctx_node (set , i );
@@ -4486,7 +4487,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
44864487
44874488 xa_for_each_start (& q -> hctx_table , j , hctx , j )
44884489 blk_mq_exit_hctx (q , set , hctx , j );
4489- mutex_unlock (& q -> sysfs_lock );
44904490
44914491 /* unregister cpuhp callbacks for exited hctxs */
44924492 blk_mq_remove_hw_queues_cpuhp (q );
@@ -4518,10 +4518,14 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
45184518
45194519 xa_init (& q -> hctx_table );
45204520
4521+ mutex_lock (& q -> sysfs_lock );
4522+
45214523 blk_mq_realloc_hw_ctxs (set , q );
45224524 if (!q -> nr_hw_queues )
45234525 goto err_hctxs ;
45244526
4527+ mutex_unlock (& q -> sysfs_lock );
4528+
45254529 INIT_WORK (& q -> timeout_work , blk_mq_timeout_work );
45264530 blk_queue_rq_timeout (q , set -> timeout ? set -> timeout : 30 * HZ );
45274531
@@ -4540,6 +4544,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
45404544 return 0 ;
45414545
45424546err_hctxs :
4547+ mutex_unlock (& q -> sysfs_lock );
45434548 blk_mq_release (q );
45444549err_exit :
45454550 q -> mq_ops = NULL ;
@@ -4920,12 +4925,12 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
49204925 return false;
49214926
49224927 /* q->elevator needs protection from ->sysfs_lock */
4923- mutex_lock (& q -> sysfs_lock );
4928+ lockdep_assert_held (& q -> sysfs_lock );
49244929
49254930 /* the check has to be done with holding sysfs_lock */
49264931 if (!q -> elevator ) {
49274932 kfree (qe );
4928- goto unlock ;
4933+ goto out ;
49294934 }
49304935
49314936 INIT_LIST_HEAD (& qe -> node );
@@ -4935,9 +4940,7 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
49354940 __elevator_get (qe -> type );
49364941 list_add (& qe -> node , head );
49374942 elevator_disable (q );
4938- unlock :
4939- mutex_unlock (& q -> sysfs_lock );
4940-
4943+ out :
49414944 return true;
49424945}
49434946
@@ -4966,11 +4969,9 @@ static void blk_mq_elv_switch_back(struct list_head *head,
49664969 list_del (& qe -> node );
49674970 kfree (qe );
49684971
4969- mutex_lock (& q -> sysfs_lock );
49704972 elevator_switch (q , t );
49714973 /* drop the reference acquired in blk_mq_elv_switch_none */
49724974 elevator_put (t );
4973- mutex_unlock (& q -> sysfs_lock );
49744975}
49754976
49764977static void __blk_mq_update_nr_hw_queues (struct blk_mq_tag_set * set ,
@@ -4990,8 +4991,11 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
49904991 if (set -> nr_maps == 1 && nr_hw_queues == set -> nr_hw_queues )
49914992 return ;
49924993
4993- list_for_each_entry (q , & set -> tag_list , tag_set_list )
4994+ list_for_each_entry (q , & set -> tag_list , tag_set_list ) {
4995+ mutex_lock (& q -> sysfs_dir_lock );
4996+ mutex_lock (& q -> sysfs_lock );
49944997 blk_mq_freeze_queue (q );
4998+ }
49954999 /*
49965000 * Switch IO scheduler to 'none', cleaning up the data associated
49975001 * with the previous scheduler. We will switch back once we are done
@@ -5047,8 +5051,11 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
50475051 list_for_each_entry (q , & set -> tag_list , tag_set_list )
50485052 blk_mq_elv_switch_back (& head , q );
50495053
5050- list_for_each_entry (q , & set -> tag_list , tag_set_list )
5054+ list_for_each_entry (q , & set -> tag_list , tag_set_list ) {
50515055 blk_mq_unfreeze_queue (q );
5056+ mutex_unlock (& q -> sysfs_lock );
5057+ mutex_unlock (& q -> sysfs_dir_lock );
5058+ }
50525059
50535060 /* Free the excess tags when nr_hw_queues shrink. */
50545061 for (i = set -> nr_hw_queues ; i < prev_nr_hw_queues ; i ++ )
0 commit comments