3333
3434struct rmid_entry {
3535 u32 rmid ;
36- atomic_t busy ;
36+ int busy ;
3737 struct list_head list ;
3838};
3939
@@ -45,13 +45,13 @@ struct rmid_entry {
4545static LIST_HEAD (rmid_free_lru );
4646
4747/**
48- * @rmid_limbo_lru list of currently unused but (potentially)
48+ * @rmid_limbo_count count of currently unused but (potentially)
4949 * dirty RMIDs.
50- * This list contains RMIDs that no one is currently using but that
50+ * This counts RMIDs that no one is currently using but that
5151 * may have a occupancy value > intel_cqm_threshold. User can change
5252 * the threshold occupancy value.
5353 */
54- static LIST_HEAD ( rmid_limbo_lru ) ;
54+ unsigned int rmid_limbo_count ;
5555
5656/**
5757 * @rmid_entry - The entry in the limbo and free lists.
@@ -103,124 +103,53 @@ static u64 __rmid_read(u32 rmid, u32 eventid)
103103 return val ;
104104}
105105
106- /*
107- * Walk the limbo list looking at any RMIDs that are flagged in the
108- * domain rmid_busy_llc bitmap as busy. If the reported LLC occupancy
109- * is below the threshold clear the busy bit and decrement the count.
110- * If the busy count gets to zero on an RMID we stop looking.
111- * This can be called from an IPI.
112- * We need an atomic for the busy count because multiple CPUs may check
113- * the same RMID at the same time.
114- */
115- static bool __check_limbo (struct rdt_domain * d )
116- {
117- struct rmid_entry * entry ;
118- u64 val ;
119-
120- list_for_each_entry (entry , & rmid_limbo_lru , list ) {
121- if (!test_bit (entry -> rmid , d -> rmid_busy_llc ))
122- continue ;
123- val = __rmid_read (entry -> rmid , QOS_L3_OCCUP_EVENT_ID );
124- if (val <= intel_cqm_threshold ) {
125- clear_bit (entry -> rmid , d -> rmid_busy_llc );
126- if (atomic_dec_and_test (& entry -> busy ))
127- return true;
128- }
129- }
130- return false;
131- }
132-
133- static void check_limbo (void * arg )
106+ static bool rmid_dirty (struct rmid_entry * entry )
134107{
135- struct rdt_domain * d ;
136-
137- d = get_domain_from_cpu (smp_processor_id (),
138- & rdt_resources_all [RDT_RESOURCE_L3 ]);
139-
140- if (d )
141- __check_limbo (d );
142- }
108+ u64 val = __rmid_read (entry -> rmid , QOS_L3_OCCUP_EVENT_ID );
143109
144- static bool has_busy_rmid (struct rdt_resource * r , struct rdt_domain * d )
145- {
146- return find_first_bit (d -> rmid_busy_llc , r -> num_rmid ) != r -> num_rmid ;
110+ return val >= intel_cqm_threshold ;
147111}
148112
149113/*
150- * Scan the limbo list and move all entries that are below the
151- * intel_cqm_threshold to the free list.
152- * Return "true" if the limbo list is empty, "false" if there are
153- * still some RMIDs there.
114+ * Check the RMIDs that are marked as busy for this domain. If the
115+ * reported LLC occupancy is below the threshold clear the busy bit and
116+ * decrement the count. If the busy count gets to zero on an RMID, we
117+ * free the RMID
154118 */
155- static bool try_freeing_limbo_rmid ( void )
119+ void __check_limbo ( struct rdt_domain * d , bool force_free )
156120{
157- struct rmid_entry * entry , * tmp ;
121+ struct rmid_entry * entry ;
158122 struct rdt_resource * r ;
159- cpumask_var_t cpu_mask ;
160- struct rdt_domain * d ;
161- bool ret = true;
162- int cpu ;
163-
164- if (list_empty (& rmid_limbo_lru ))
165- return ret ;
123+ u32 crmid = 1 , nrmid ;
166124
167125 r = & rdt_resources_all [RDT_RESOURCE_L3 ];
168126
169- cpu = get_cpu ();
170-
171127 /*
172- * First see if we can free up an RMID by checking busy values
173- * on the local package.
128+ * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
129+ * are marked as busy for occupancy < threshold. If the occupancy
130+ * is less than the threshold decrement the busy counter of the
131+ * RMID and move it to the free list when the counter reaches 0.
174132 */
175- d = get_domain_from_cpu (cpu , r );
176- if (d && has_busy_rmid (r , d ) && __check_limbo (d )) {
177- list_for_each_entry_safe (entry , tmp , & rmid_limbo_lru , list ) {
178- if (atomic_read (& entry -> busy ) == 0 ) {
179- list_del (& entry -> list );
133+ for (;;) {
134+ nrmid = find_next_bit (d -> rmid_busy_llc , r -> num_rmid , crmid );
135+ if (nrmid >= r -> num_rmid )
136+ break ;
137+
138+ entry = __rmid_entry (nrmid );
139+ if (force_free || !rmid_dirty (entry )) {
140+ clear_bit (entry -> rmid , d -> rmid_busy_llc );
141+ if (!-- entry -> busy ) {
142+ rmid_limbo_count -- ;
180143 list_add_tail (& entry -> list , & rmid_free_lru );
181- goto done ;
182144 }
183145 }
146+ crmid = nrmid + 1 ;
184147 }
148+ }
185149
186- if (!zalloc_cpumask_var (& cpu_mask , GFP_KERNEL )) {
187- ret = false;
188- goto done ;
189- }
190-
191- /*
192- * Build a mask of other domains that have busy RMIDs
193- */
194- list_for_each_entry (d , & r -> domains , list ) {
195- if (!cpumask_test_cpu (cpu , & d -> cpu_mask ) &&
196- has_busy_rmid (r , d ))
197- cpumask_set_cpu (cpumask_any (& d -> cpu_mask ), cpu_mask );
198- }
199- if (cpumask_empty (cpu_mask )) {
200- ret = false;
201- goto free_mask ;
202- }
203-
204- /*
205- * Scan domains with busy RMIDs to check if they still are busy
206- */
207- on_each_cpu_mask (cpu_mask , check_limbo , NULL , true);
208-
209- /* Walk limbo list moving all free RMIDs to the &rmid_free_lru list */
210- list_for_each_entry_safe (entry , tmp , & rmid_limbo_lru , list ) {
211- if (atomic_read (& entry -> busy ) != 0 ) {
212- ret = false;
213- continue ;
214- }
215- list_del (& entry -> list );
216- list_add_tail (& entry -> list , & rmid_free_lru );
217- }
218-
219- free_mask :
220- free_cpumask_var (cpu_mask );
221- done :
222- put_cpu ();
223- return ret ;
150+ bool has_busy_rmid (struct rdt_resource * r , struct rdt_domain * d )
151+ {
152+ return find_first_bit (d -> rmid_busy_llc , r -> num_rmid ) != r -> num_rmid ;
224153}
225154
226155/*
@@ -231,15 +160,11 @@ static bool try_freeing_limbo_rmid(void)
231160int alloc_rmid (void )
232161{
233162 struct rmid_entry * entry ;
234- bool ret ;
235163
236164 lockdep_assert_held (& rdtgroup_mutex );
237165
238- if (list_empty (& rmid_free_lru )) {
239- ret = try_freeing_limbo_rmid ();
240- if (list_empty (& rmid_free_lru ))
241- return ret ? - ENOSPC : - EBUSY ;
242- }
166+ if (list_empty (& rmid_free_lru ))
167+ return rmid_limbo_count ? - EBUSY : - ENOSPC ;
243168
244169 entry = list_first_entry (& rmid_free_lru ,
245170 struct rmid_entry , list );
@@ -252,29 +177,35 @@ static void add_rmid_to_limbo(struct rmid_entry *entry)
252177{
253178 struct rdt_resource * r ;
254179 struct rdt_domain * d ;
255- int cpu , nbusy = 0 ;
180+ int cpu ;
256181 u64 val ;
257182
258183 r = & rdt_resources_all [RDT_RESOURCE_L3 ];
259184
185+ entry -> busy = 0 ;
260186 cpu = get_cpu ();
261187 list_for_each_entry (d , & r -> domains , list ) {
262188 if (cpumask_test_cpu (cpu , & d -> cpu_mask )) {
263189 val = __rmid_read (entry -> rmid , QOS_L3_OCCUP_EVENT_ID );
264190 if (val <= intel_cqm_threshold )
265191 continue ;
266192 }
193+
194+ /*
195+ * For the first limbo RMID in the domain,
196+ * setup up the limbo worker.
197+ */
198+ if (!has_busy_rmid (r , d ))
199+ cqm_setup_limbo_handler (d , CQM_LIMBOCHECK_INTERVAL );
267200 set_bit (entry -> rmid , d -> rmid_busy_llc );
268- nbusy ++ ;
201+ entry -> busy ++ ;
269202 }
270203 put_cpu ();
271204
272- if (nbusy ) {
273- atomic_set (& entry -> busy , nbusy );
274- list_add_tail (& entry -> list , & rmid_limbo_lru );
275- } else {
205+ if (entry -> busy )
206+ rmid_limbo_count ++ ;
207+ else
276208 list_add_tail (& entry -> list , & rmid_free_lru );
277- }
278209}
279210
280211void free_rmid (u32 rmid )
@@ -387,6 +318,50 @@ static void mbm_update(struct rdt_domain *d, int rmid)
387318 }
388319}
389320
321+ /*
322+ * Handler to scan the limbo list and move the RMIDs
323+ * to free list whose occupancy < threshold_occupancy.
324+ */
325+ void cqm_handle_limbo (struct work_struct * work )
326+ {
327+ unsigned long delay = msecs_to_jiffies (CQM_LIMBOCHECK_INTERVAL );
328+ int cpu = smp_processor_id ();
329+ struct rdt_resource * r ;
330+ struct rdt_domain * d ;
331+
332+ mutex_lock (& rdtgroup_mutex );
333+
334+ r = & rdt_resources_all [RDT_RESOURCE_L3 ];
335+ d = get_domain_from_cpu (cpu , r );
336+
337+ if (!d ) {
338+ pr_warn_once ("Failure to get domain for limbo worker\n" );
339+ goto out_unlock ;
340+ }
341+
342+ __check_limbo (d , false);
343+
344+ if (has_busy_rmid (r , d ))
345+ schedule_delayed_work_on (cpu , & d -> cqm_limbo , delay );
346+
347+ out_unlock :
348+ mutex_unlock (& rdtgroup_mutex );
349+ }
350+
351+ void cqm_setup_limbo_handler (struct rdt_domain * dom , unsigned long delay_ms )
352+ {
353+ unsigned long delay = msecs_to_jiffies (delay_ms );
354+ struct rdt_resource * r ;
355+ int cpu ;
356+
357+ r = & rdt_resources_all [RDT_RESOURCE_L3 ];
358+
359+ cpu = cpumask_any (& dom -> cpu_mask );
360+ dom -> cqm_work_cpu = cpu ;
361+
362+ schedule_delayed_work_on (cpu , & dom -> cqm_limbo , delay );
363+ }
364+
390365void mbm_handle_overflow (struct work_struct * work )
391366{
392367 unsigned long delay = msecs_to_jiffies (MBM_OVERFLOW_INTERVAL );
@@ -413,6 +388,7 @@ void mbm_handle_overflow(struct work_struct *work)
413388 }
414389
415390 schedule_delayed_work_on (cpu , & d -> mbm_over , delay );
391+
416392out_unlock :
417393 mutex_unlock (& rdtgroup_mutex );
418394}
0 commit comments