@@ -1816,19 +1816,16 @@ static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
18161816static void ufshcd_ungate_work (struct work_struct * work )
18171817{
18181818 int ret ;
1819- unsigned long flags ;
18201819 struct ufs_hba * hba = container_of (work , struct ufs_hba ,
18211820 clk_gating .ungate_work );
18221821
18231822 cancel_delayed_work_sync (& hba -> clk_gating .gate_work );
18241823
1825- spin_lock_irqsave (hba -> host -> host_lock , flags );
1826- if (hba -> clk_gating .state == CLKS_ON ) {
1827- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
1828- return ;
1824+ scoped_guard (spinlock_irqsave , & hba -> clk_gating .lock ) {
1825+ if (hba -> clk_gating .state == CLKS_ON )
1826+ return ;
18291827 }
18301828
1831- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
18321829 ufshcd_hba_vreg_set_hpm (hba );
18331830 ufshcd_setup_clocks (hba , true);
18341831
@@ -1863,7 +1860,7 @@ void ufshcd_hold(struct ufs_hba *hba)
18631860 if (!ufshcd_is_clkgating_allowed (hba ) ||
18641861 !hba -> clk_gating .is_initialized )
18651862 return ;
1866- spin_lock_irqsave (hba -> host -> host_lock , flags );
1863+ spin_lock_irqsave (& hba -> clk_gating . lock , flags );
18671864 hba -> clk_gating .active_reqs ++ ;
18681865
18691866start :
@@ -1879,11 +1876,11 @@ void ufshcd_hold(struct ufs_hba *hba)
18791876 */
18801877 if (ufshcd_can_hibern8_during_gating (hba ) &&
18811878 ufshcd_is_link_hibern8 (hba )) {
1882- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
1879+ spin_unlock_irqrestore (& hba -> clk_gating . lock , flags );
18831880 flush_result = flush_work (& hba -> clk_gating .ungate_work );
18841881 if (hba -> clk_gating .is_suspended && !flush_result )
18851882 return ;
1886- spin_lock_irqsave (hba -> host -> host_lock , flags );
1883+ spin_lock_irqsave (& hba -> clk_gating . lock , flags );
18871884 goto start ;
18881885 }
18891886 break ;
@@ -1912,48 +1909,50 @@ void ufshcd_hold(struct ufs_hba *hba)
19121909 */
19131910 fallthrough ;
19141911 case REQ_CLKS_ON :
1915- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
1912+ spin_unlock_irqrestore (& hba -> clk_gating . lock , flags );
19161913 flush_work (& hba -> clk_gating .ungate_work );
19171914 /* Make sure state is CLKS_ON before returning */
1918- spin_lock_irqsave (hba -> host -> host_lock , flags );
1915+ spin_lock_irqsave (& hba -> clk_gating . lock , flags );
19191916 goto start ;
19201917 default :
19211918 dev_err (hba -> dev , "%s: clk gating is in invalid state %d\n" ,
19221919 __func__ , hba -> clk_gating .state );
19231920 break ;
19241921 }
1925- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
1922+ spin_unlock_irqrestore (& hba -> clk_gating . lock , flags );
19261923}
19271924EXPORT_SYMBOL_GPL (ufshcd_hold );
19281925
19291926static void ufshcd_gate_work (struct work_struct * work )
19301927{
19311928 struct ufs_hba * hba = container_of (work , struct ufs_hba ,
19321929 clk_gating .gate_work .work );
1933- unsigned long flags ;
19341930 int ret ;
19351931
1936- spin_lock_irqsave ( hba -> host -> host_lock , flags );
1937- /*
1938- * In case you are here to cancel this work the gating state
1939- * would be marked as REQ_CLKS_ON. In this case save time by
1940- * skipping the gating work and exit after changing the clock
1941- * state to CLKS_ON.
1942- */
1943- if (hba -> clk_gating .is_suspended ||
1944- ( hba -> clk_gating .state != REQ_CLKS_OFF ) ) {
1945- hba -> clk_gating .state = CLKS_ON ;
1946- trace_ufshcd_clk_gating (dev_name (hba -> dev ),
1947- hba -> clk_gating .state );
1948- goto rel_lock ;
1949- }
1932+ scoped_guard ( spinlock_irqsave , & hba -> clk_gating . lock ) {
1933+ /*
1934+ * In case you are here to cancel this work the gating state
1935+ * would be marked as REQ_CLKS_ON. In this case save time by
1936+ * skipping the gating work and exit after changing the clock
1937+ * state to CLKS_ON.
1938+ */
1939+ if (hba -> clk_gating .is_suspended ||
1940+ hba -> clk_gating .state != REQ_CLKS_OFF ) {
1941+ hba -> clk_gating .state = CLKS_ON ;
1942+ trace_ufshcd_clk_gating (dev_name (hba -> dev ),
1943+ hba -> clk_gating .state );
1944+ return ;
1945+ }
19501946
1951- if (ufshcd_is_ufs_dev_busy (hba ) ||
1952- hba -> ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
1953- hba -> clk_gating .active_reqs )
1954- goto rel_lock ;
1947+ if (hba -> clk_gating .active_reqs )
1948+ return ;
1949+ }
19551950
1956- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
1951+ scoped_guard (spinlock_irqsave , hba -> host -> host_lock ) {
1952+ if (ufshcd_is_ufs_dev_busy (hba ) ||
1953+ hba -> ufshcd_state != UFSHCD_STATE_OPERATIONAL )
1954+ return ;
1955+ }
19571956
19581957 /* put the link into hibern8 mode before turning off clocks */
19591958 if (ufshcd_can_hibern8_during_gating (hba )) {
@@ -1964,7 +1963,7 @@ static void ufshcd_gate_work(struct work_struct *work)
19641963 __func__ , ret );
19651964 trace_ufshcd_clk_gating (dev_name (hba -> dev ),
19661965 hba -> clk_gating .state );
1967- goto out ;
1966+ return ;
19681967 }
19691968 ufshcd_set_link_hibern8 (hba );
19701969 }
@@ -1984,32 +1983,34 @@ static void ufshcd_gate_work(struct work_struct *work)
19841983 * prevent from doing cancel work multiple times when there are
19851984 * new requests arriving before the current cancel work is done.
19861985 */
1987- spin_lock_irqsave ( hba -> host -> host_lock , flags );
1986+ guard ( spinlock_irqsave )( & hba -> clk_gating . lock );
19881987 if (hba -> clk_gating .state == REQ_CLKS_OFF ) {
19891988 hba -> clk_gating .state = CLKS_OFF ;
19901989 trace_ufshcd_clk_gating (dev_name (hba -> dev ),
19911990 hba -> clk_gating .state );
19921991 }
1993- rel_lock :
1994- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
1995- out :
1996- return ;
19971992}
19981993
1999- /* host lock must be held before calling this variant */
20001994static void __ufshcd_release (struct ufs_hba * hba )
20011995{
1996+ lockdep_assert_held (& hba -> clk_gating .lock );
1997+
20021998 if (!ufshcd_is_clkgating_allowed (hba ))
20031999 return ;
20042000
20052001 hba -> clk_gating .active_reqs -- ;
20062002
20072003 if (hba -> clk_gating .active_reqs || hba -> clk_gating .is_suspended ||
2008- hba -> ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
2009- ufshcd_has_pending_tasks (hba ) || !hba -> clk_gating .is_initialized ||
2004+ !hba -> clk_gating .is_initialized ||
20102005 hba -> clk_gating .state == CLKS_OFF )
20112006 return ;
20122007
2008+ scoped_guard (spinlock_irqsave , hba -> host -> host_lock ) {
2009+ if (ufshcd_has_pending_tasks (hba ) ||
2010+ hba -> ufshcd_state != UFSHCD_STATE_OPERATIONAL )
2011+ return ;
2012+ }
2013+
20132014 hba -> clk_gating .state = REQ_CLKS_OFF ;
20142015 trace_ufshcd_clk_gating (dev_name (hba -> dev ), hba -> clk_gating .state );
20152016 queue_delayed_work (hba -> clk_gating .clk_gating_workq ,
@@ -2019,11 +2020,8 @@ static void __ufshcd_release(struct ufs_hba *hba)
20192020
20202021void ufshcd_release (struct ufs_hba * hba )
20212022{
2022- unsigned long flags ;
2023-
2024- spin_lock_irqsave (hba -> host -> host_lock , flags );
2023+ guard (spinlock_irqsave )(& hba -> clk_gating .lock );
20252024 __ufshcd_release (hba );
2026- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
20272025}
20282026EXPORT_SYMBOL_GPL (ufshcd_release );
20292027
@@ -2038,11 +2036,9 @@ static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
20382036void ufshcd_clkgate_delay_set (struct device * dev , unsigned long value )
20392037{
20402038 struct ufs_hba * hba = dev_get_drvdata (dev );
2041- unsigned long flags ;
20422039
2043- spin_lock_irqsave ( hba -> host -> host_lock , flags );
2040+ guard ( spinlock_irqsave )( & hba -> clk_gating . lock );
20442041 hba -> clk_gating .delay_ms = value ;
2045- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
20462042}
20472043EXPORT_SYMBOL_GPL (ufshcd_clkgate_delay_set );
20482044
@@ -2070,26 +2066,25 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
20702066 struct device_attribute * attr , const char * buf , size_t count )
20712067{
20722068 struct ufs_hba * hba = dev_get_drvdata (dev );
2073- unsigned long flags ;
20742069 u32 value ;
20752070
20762071 if (kstrtou32 (buf , 0 , & value ))
20772072 return - EINVAL ;
20782073
20792074 value = !!value ;
20802075
2081- spin_lock_irqsave (hba -> host -> host_lock , flags );
2076+ guard (spinlock_irqsave )(& hba -> clk_gating .lock );
2077+
20822078 if (value == hba -> clk_gating .is_enabled )
2083- goto out ;
2079+ return count ;
20842080
20852081 if (value )
20862082 __ufshcd_release (hba );
20872083 else
20882084 hba -> clk_gating .active_reqs ++ ;
20892085
20902086 hba -> clk_gating .is_enabled = value ;
2091- out :
2092- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
2087+
20932088 return count ;
20942089}
20952090
@@ -2131,6 +2126,8 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
21312126 INIT_DELAYED_WORK (& hba -> clk_gating .gate_work , ufshcd_gate_work );
21322127 INIT_WORK (& hba -> clk_gating .ungate_work , ufshcd_ungate_work );
21332128
2129+ spin_lock_init (& hba -> clk_gating .lock );
2130+
21342131 hba -> clk_gating .clk_gating_workq = alloc_ordered_workqueue (
21352132 "ufs_clk_gating_%d" , WQ_MEM_RECLAIM | WQ_HIGHPRI ,
21362133 hba -> host -> host_no );
@@ -9126,7 +9123,6 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
91269123 int ret = 0 ;
91279124 struct ufs_clk_info * clki ;
91289125 struct list_head * head = & hba -> clk_list_head ;
9129- unsigned long flags ;
91309126 ktime_t start = ktime_get ();
91319127 bool clk_state_changed = false;
91329128
@@ -9177,11 +9173,10 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
91779173 clk_disable_unprepare (clki -> clk );
91789174 }
91799175 } else if (!ret && on ) {
9180- spin_lock_irqsave ( hba -> host -> host_lock , flags );
9181- hba -> clk_gating .state = CLKS_ON ;
9176+ scoped_guard ( spinlock_irqsave , & hba -> clk_gating . lock )
9177+ hba -> clk_gating .state = CLKS_ON ;
91829178 trace_ufshcd_clk_gating (dev_name (hba -> dev ),
91839179 hba -> clk_gating .state );
9184- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
91859180 }
91869181
91879182 if (clk_state_changed )
0 commit comments