|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
| 2 | +/* |
| 3 | + * Copyright (C) 2020 Western Digital Corporation or its affiliates. |
| 4 | + * Adapted from arch/arm64/kernel/efi.c |
| 5 | + */ |
| 6 | + |
| 7 | +#include<linux/efi.h> |
| 8 | +#include<linux/init.h> |
| 9 | + |
| 10 | +#include<asm/efi.h> |
| 11 | +#include<asm/pgtable.h> |
| 12 | +#include<asm/pgtable-bits.h> |
| 13 | + |
| 14 | +/* |
| 15 | + * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be |
| 16 | + * executable, everything else can be mapped with the XN bits |
| 17 | + * set. Also take the new (optional) RO/XP bits into account. |
| 18 | + */ |
| 19 | +static__initpgprot_tefimem_to_pgprot_map(efi_memory_desc_t*md) |
| 20 | +{ |
| 21 | +u64attr=md->attribute; |
| 22 | +u32type=md->type; |
| 23 | + |
| 24 | +if (type==EFI_MEMORY_MAPPED_IO) |
| 25 | +returnPAGE_KERNEL; |
| 26 | + |
| 27 | +/* R-- */ |
| 28 | +if ((attr& (EFI_MEMORY_XP |EFI_MEMORY_RO))== |
| 29 | + (EFI_MEMORY_XP |EFI_MEMORY_RO)) |
| 30 | +returnPAGE_KERNEL_READ; |
| 31 | + |
| 32 | +/* R-X */ |
| 33 | +if (attr&EFI_MEMORY_RO) |
| 34 | +returnPAGE_KERNEL_READ_EXEC; |
| 35 | + |
| 36 | +/* RW- */ |
| 37 | +if (((attr& (EFI_MEMORY_RP |EFI_MEMORY_WP |EFI_MEMORY_XP))== |
| 38 | +EFI_MEMORY_XP)|| |
| 39 | +type!=EFI_RUNTIME_SERVICES_CODE) |
| 40 | +returnPAGE_KERNEL; |
| 41 | + |
| 42 | +/* RWX */ |
| 43 | +returnPAGE_KERNEL_EXEC; |
| 44 | +} |
| 45 | + |
| 46 | +int__initefi_create_mapping(structmm_struct*mm,efi_memory_desc_t*md) |
| 47 | +{ |
| 48 | +pgprot_tprot=__pgprot(pgprot_val(efimem_to_pgprot_map(md))& |
| 49 | +~(_PAGE_GLOBAL)); |
| 50 | +inti; |
| 51 | + |
| 52 | +/* RISC-V maps one page at a time */ |
| 53 | +for (i=0;i<md->num_pages;i++) |
| 54 | +create_pgd_mapping(mm->pgd,md->virt_addr+i*PAGE_SIZE, |
| 55 | +md->phys_addr+i*PAGE_SIZE, |
| 56 | +PAGE_SIZE,prot); |
| 57 | +return0; |
| 58 | +} |
| 59 | + |
| 60 | +staticint__initset_permissions(pte_t*ptep,unsigned longaddr,void*data) |
| 61 | +{ |
| 62 | +efi_memory_desc_t*md=data; |
| 63 | +pte_tpte=READ_ONCE(*ptep); |
| 64 | +unsigned longval; |
| 65 | + |
| 66 | +if (md->attribute&EFI_MEMORY_RO) { |
| 67 | +val=pte_val(pte)& ~_PAGE_WRITE; |
| 68 | +val=pte_val(pte) |_PAGE_READ; |
| 69 | +pte=__pte(val); |
| 70 | +} |
| 71 | +if (md->attribute&EFI_MEMORY_XP) { |
| 72 | +val=pte_val(pte)& ~_PAGE_EXEC; |
| 73 | +pte=__pte(val); |
| 74 | +} |
| 75 | +set_pte(ptep,pte); |
| 76 | + |
| 77 | +return0; |
| 78 | +} |
| 79 | + |
| 80 | +int__initefi_set_mapping_permissions(structmm_struct*mm, |
| 81 | +efi_memory_desc_t*md) |
| 82 | +{ |
| 83 | +BUG_ON(md->type!=EFI_RUNTIME_SERVICES_CODE&& |
| 84 | +md->type!=EFI_RUNTIME_SERVICES_DATA); |
| 85 | + |
| 86 | +/* |
| 87 | + * Calling apply_to_page_range() is only safe on regions that are |
| 88 | + * guaranteed to be mapped down to pages. Since we are only called |
| 89 | + * for regions that have been mapped using efi_create_mapping() above |
| 90 | + * (and this is checked by the generic Memory Attributes table parsing |
| 91 | + * routines), there is no need to check that again here. |
| 92 | + */ |
| 93 | +returnapply_to_page_range(mm,md->virt_addr, |
| 94 | +md->num_pages <<EFI_PAGE_SHIFT, |
| 95 | +set_permissions,md); |
| 96 | +} |