@@ -122,7 +122,10 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
122122
123123void blk_freeze_queue_start (struct request_queue * q )
124124{
125+ int sub_class ;
126+
125127 mutex_lock (& q -> mq_freeze_lock );
128+ sub_class = q -> mq_freeze_depth ;
126129 if (++ q -> mq_freeze_depth == 1 ) {
127130 percpu_ref_kill (& q -> q_usage_counter );
128131 mutex_unlock (& q -> mq_freeze_lock );
@@ -131,6 +134,12 @@ void blk_freeze_queue_start(struct request_queue *q)
131134 } else {
132135 mutex_unlock (& q -> mq_freeze_lock );
133136 }
137+ /*
138+ * model as down_write_trylock() so that two concurrent freeze queue
139+ * can be allowed
140+ */
141+ if (blk_queue_freeze_lockdep (q ))
142+ rwsem_acquire (& q -> q_usage_counter_map , sub_class , 1 , _RET_IP_ );
134143}
135144EXPORT_SYMBOL_GPL (blk_freeze_queue_start );
136145
@@ -188,6 +197,9 @@ void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
188197 wake_up_all (& q -> mq_freeze_wq );
189198 }
190199 mutex_unlock (& q -> mq_freeze_lock );
200+
201+ if (blk_queue_freeze_lockdep (q ))
202+ rwsem_release (& q -> q_usage_counter_map , _RET_IP_ );
191203}
192204
193205void blk_mq_unfreeze_queue (struct request_queue * q )
@@ -4241,6 +4253,9 @@ void blk_mq_destroy_queue(struct request_queue *q)
42414253 blk_queue_start_drain (q );
42424254 blk_mq_freeze_queue_wait (q );
42434255
4256+ /* counter pair of acquire in blk_queue_start_drain */
4257+ if (blk_queue_freeze_lockdep (q ))
4258+ rwsem_release (& q -> q_usage_counter_map , _RET_IP_ );
42444259 blk_sync_queue (q );
42454260 blk_mq_cancel_work_sync (q );
42464261 blk_mq_exit_queue (q );
0 commit comments