@@ -365,8 +365,6 @@ EXPORT_SYMBOL(kunmap_high);
365365
366366#include <asm/kmap_size.h>
367367
368- static DEFINE_PER_CPU (int , __kmap_local_idx ) ;
369-
370368/*
371369 * With DEBUG_KMAP_LOCAL the stack depth is doubled and every second
372370 * slot is unused which acts as a guard page
@@ -379,23 +377,21 @@ static DEFINE_PER_CPU(int, __kmap_local_idx);
379377
380378static inline int kmap_local_idx_push (void )
381379{
382- int idx = __this_cpu_add_return (__kmap_local_idx , KM_INCR ) - 1 ;
383-
384380 WARN_ON_ONCE (in_irq () && !irqs_disabled ());
385- BUG_ON (idx >= KM_MAX_IDX );
386- return idx ;
381+ current -> kmap_ctrl .idx += KM_INCR ;
382+ BUG_ON (current -> kmap_ctrl .idx >= KM_MAX_IDX );
383+ return current -> kmap_ctrl .idx - 1 ;
387384}
388385
389386static inline int kmap_local_idx (void )
390387{
391- return __this_cpu_read ( __kmap_local_idx ) - 1 ;
388+ return current -> kmap_ctrl . idx - 1 ;
392389}
393390
394391static inline void kmap_local_idx_pop (void )
395392{
396- int idx = __this_cpu_sub_return (__kmap_local_idx , KM_INCR );
397-
398- BUG_ON (idx < 0 );
393+ current -> kmap_ctrl .idx -= KM_INCR ;
394+ BUG_ON (current -> kmap_ctrl .idx < 0 );
399395}
400396
401397#ifndef arch_kmap_local_post_map
@@ -464,6 +460,7 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
464460 pteval = pfn_pte (pfn , prot );
465461 set_pte_at (& init_mm , vaddr , kmap_pte - idx , pteval );
466462 arch_kmap_local_post_map (vaddr , pteval );
463+ current -> kmap_ctrl .pteval [kmap_local_idx ()] = pteval ;
467464 preempt_enable ();
468465
469466 return (void * )vaddr ;
@@ -522,10 +519,92 @@ void kunmap_local_indexed(void *vaddr)
522519 arch_kmap_local_pre_unmap (addr );
523520 pte_clear (& init_mm , addr , kmap_pte - idx );
524521 arch_kmap_local_post_unmap (addr );
522+ current -> kmap_ctrl .pteval [kmap_local_idx ()] = __pte (0 );
525523 kmap_local_idx_pop ();
526524 preempt_enable ();
527525}
528526EXPORT_SYMBOL (kunmap_local_indexed );
527+
528+ /*
529+ * Invoked before switch_to(). This is safe even when during or after
530+ * clearing the maps an interrupt which needs a kmap_local happens because
531+ * the task::kmap_ctrl.idx is not modified by the unmapping code so a
532+ * nested kmap_local will use the next unused index and restore the index
533+ * on unmap. The already cleared kmaps of the outgoing task are irrelevant
534+ * because the interrupt context does not know about them. The same applies
535+ * when scheduling back in for an interrupt which happens before the
536+ * restore is complete.
537+ */
538+ void __kmap_local_sched_out (void )
539+ {
540+ struct task_struct * tsk = current ;
541+ pte_t * kmap_pte = kmap_get_pte ();
542+ int i ;
543+
544+ /* Clear kmaps */
545+ for (i = 0 ; i < tsk -> kmap_ctrl .idx ; i ++ ) {
546+ pte_t pteval = tsk -> kmap_ctrl .pteval [i ];
547+ unsigned long addr ;
548+ int idx ;
549+
550+ /* With debug all even slots are unmapped and act as guard */
551+ if (IS_ENABLED (CONFIG_DEBUG_HIGHMEM ) && !(i & 0x01 )) {
552+ WARN_ON_ONCE (!pte_none (pteval ));
553+ continue ;
554+ }
555+ if (WARN_ON_ONCE (pte_none (pteval )))
556+ continue ;
557+
558+ /*
559+ * This is a horrible hack for XTENSA to calculate the
560+ * coloured PTE index. Uses the PFN encoded into the pteval
561+ * and the map index calculation because the actual mapped
562+ * virtual address is not stored in task::kmap_ctrl.
563+ * For any sane architecture this is optimized out.
564+ */
565+ idx = arch_kmap_local_map_idx (i , pte_pfn (pteval ));
566+
567+ addr = __fix_to_virt (FIX_KMAP_BEGIN + idx );
568+ arch_kmap_local_pre_unmap (addr );
569+ pte_clear (& init_mm , addr , kmap_pte - idx );
570+ arch_kmap_local_post_unmap (addr );
571+ }
572+ }
573+
574+ void __kmap_local_sched_in (void )
575+ {
576+ struct task_struct * tsk = current ;
577+ pte_t * kmap_pte = kmap_get_pte ();
578+ int i ;
579+
580+ /* Restore kmaps */
581+ for (i = 0 ; i < tsk -> kmap_ctrl .idx ; i ++ ) {
582+ pte_t pteval = tsk -> kmap_ctrl .pteval [i ];
583+ unsigned long addr ;
584+ int idx ;
585+
586+ /* With debug all even slots are unmapped and act as guard */
587+ if (IS_ENABLED (CONFIG_DEBUG_HIGHMEM ) && !(i & 0x01 )) {
588+ WARN_ON_ONCE (!pte_none (pteval ));
589+ continue ;
590+ }
591+ if (WARN_ON_ONCE (pte_none (pteval )))
592+ continue ;
593+
594+ /* See comment in __kmap_local_sched_out() */
595+ idx = arch_kmap_local_map_idx (i , pte_pfn (pteval ));
596+ addr = __fix_to_virt (FIX_KMAP_BEGIN + idx );
597+ set_pte_at (& init_mm , addr , kmap_pte - idx , pteval );
598+ arch_kmap_local_post_map (addr , pteval );
599+ }
600+ }
601+
602+ void kmap_local_fork (struct task_struct * tsk )
603+ {
604+ if (WARN_ON_ONCE (tsk -> kmap_ctrl .idx ))
605+ memset (& tsk -> kmap_ctrl , 0 , sizeof (tsk -> kmap_ctrl ));
606+ }
607+
529608#endif
530609
531610#if defined(HASHED_PAGE_VIRTUAL )
0 commit comments