Update Linux from version 6.1.20 to 6.1.21.

main inf-kernel-0.0.0.19
inference 3 months ago
parent 16e190a9fa
commit e7abe588dc
Signed by: inference
SSH Key Fingerprint: SHA256:9Pl0nZ2UJacgm+IeEtLSZ4FOESgP1eKCtRflfPfdX9M

@ -1221,7 +1221,7 @@ defined:
return
-ECHILD and it will be called again in ref-walk mode.
``_weak_revalidate``
``d_weak_revalidate``
called when the VFS needs to revalidate a "jumped" dentry. This
is called when a path-walk ends at dentry that was not acquired
by doing a lookup in the parent directory. This includes "/",

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 20
SUBLEVEL = 21
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

@ -135,16 +135,17 @@ static int get_timer_irq(void)
int constant_clockevent_init(void)
{
int irq;
unsigned int cpu = smp_processor_id();
unsigned long min_delta = 0x600;
unsigned long max_delta = (1UL << 48) - 1;
struct clock_event_device *cd;
static int timer_irq_installed = 0;
static int irq = 0, timer_irq_installed = 0;
irq = get_timer_irq();
if (irq < 0)
pr_err("Failed to map irq %d (timer)\n", irq);
if (!timer_irq_installed) {
irq = get_timer_irq();
if (irq < 0)
pr_err("Failed to map irq %d (timer)\n", irq);
}
cd = &per_cpu(constant_clockevent_device, cpu);

@ -146,19 +146,6 @@ CFLAGS-$(CONFIG_PPC32) += $(call cc-option, $(MULTIPLEWORD))
CFLAGS-$(CONFIG_PPC32) += $(call cc-option,-mno-readonly-in-sdata)
ifdef CONFIG_PPC_BOOK3S_64
ifdef CONFIG_CPU_LITTLE_ENDIAN
CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power8
else
CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power4
endif
CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power10, \
$(call cc-option,-mtune=power9, \
$(call cc-option,-mtune=power8)))
else ifdef CONFIG_PPC_BOOK3E_64
CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64
endif
ifdef CONFIG_FUNCTION_TRACER
CC_FLAGS_FTRACE := -pg
ifdef CONFIG_MPROFILE_KERNEL
@ -166,11 +153,12 @@ CC_FLAGS_FTRACE += -mprofile-kernel
endif
endif
CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
AFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += -mcpu=$(CONFIG_TARGET_CPU)
AFLAGS-$(CONFIG_TARGET_CPU_BOOL) += -mcpu=$(CONFIG_TARGET_CPU)
CFLAGS-$(CONFIG_E5500_CPU) += $(call cc-option,-mcpu=e500mc64,-mcpu=powerpc64)
CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU))
CFLAGS-$(CONFIG_POWERPC64_CPU) += $(call cc-option,-mtune=power10, \
$(call cc-option,-mtune=power9, \
$(call cc-option,-mtune=power8)))
asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
@ -213,10 +201,7 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# often slow when they are implemented at all
KBUILD_CFLAGS += $(call cc-option,-mno-string)
cpu-as-$(CONFIG_40x) += -Wa,-m405
cpu-as-$(CONFIG_44x) += -Wa,-m440
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
cpu-as-$(CONFIG_PPC_E500) += -Wa,-me500
# When using '-many -mpower4' gas will first try and find a matching power4
# mnemonic and failing that it will allow any valid mnemonic that GAS knows
@ -224,7 +209,6 @@ cpu-as-$(CONFIG_PPC_E500) += -Wa,-me500
# LLVM IAS doesn't understand either flag: https://github.com/ClangBuiltLinux/linux/issues/675
# but LLVM IAS only supports ISA >= 2.06 for Book3S 64 anyway...
cpu-as-$(CONFIG_PPC_BOOK3S_64) += $(call as-option,-Wa$(comma)-mpower4) $(call as-option,-Wa$(comma)-many)
cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc)
KBUILD_AFLAGS += $(cpu-as-y)
KBUILD_CFLAGS += $(cpu-as-y)

@ -39,13 +39,19 @@ BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
$(LINUXINCLUDE)
ifdef CONFIG_PPC64_BOOT_WRAPPER
ifdef CONFIG_CPU_LITTLE_ENDIAN
BOOTCFLAGS += -m64 -mcpu=powerpc64le
BOOTCFLAGS += -m64
else
BOOTCFLAGS += -m64 -mcpu=powerpc64
BOOTCFLAGS += -m32
endif
ifdef CONFIG_TARGET_CPU_BOOL
BOOTCFLAGS += -mcpu=$(CONFIG_TARGET_CPU)
else ifdef CONFIG_PPC64_BOOT_WRAPPER
ifdef CONFIG_CPU_LITTLE_ENDIAN
BOOTCFLAGS += -mcpu=powerpc64le
else
BOOTCFLAGS += -m32 -mcpu=powerpc
BOOTCFLAGS += -mcpu=powerpc64
endif
endif
BOOTCFLAGS += -isystem $(shell $(BOOTCC) -print-file-name=include)

@ -271,11 +271,16 @@ static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma
}
/*
* Check for a read fault. This could be caused by a read on an
* inaccessible page (i.e. PROT_NONE), or a Radix MMU execute-only page.
* VM_READ, VM_WRITE and VM_EXEC all imply read permissions, as
* defined in protection_map[]. Read faults can only be caused by
* a PROT_NONE mapping, or with a PROT_EXEC-only mapping on Radix.
*/
if (unlikely(!(vma->vm_flags & VM_READ)))
if (unlikely(!vma_is_accessible(vma)))
return true;
if (unlikely(radix_enabled() && ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)))
return true;
/*
* We should ideally do the vma pkey access check here. But in the
* fault path, handle_mm_fault() also does the same check. To avoid

@ -118,19 +118,18 @@ endchoice
choice
prompt "CPU selection"
default GENERIC_CPU
help
This will create a kernel which is optimised for a particular CPU.
The resulting kernel may not run on other CPUs, so use this with care.
If unsure, select Generic.
config GENERIC_CPU
config POWERPC64_CPU
bool "Generic (POWER5 and PowerPC 970 and above)"
depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
select PPC_64S_HASH_MMU
config GENERIC_CPU
config POWERPC64_CPU
bool "Generic (POWER8 and above)"
depends on PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
select ARCH_HAS_FAST_MULTIPLIER
@ -143,6 +142,7 @@ config POWERPC_CPU
config CELL_CPU
bool "Cell Broadband Engine"
depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
depends on !CC_IS_CLANG
select PPC_64S_HASH_MMU
config PPC_970_CPU
@ -184,10 +184,12 @@ config E5500_CPU
config E6500_CPU
bool "Freescale e6500"
depends on PPC64 && PPC_E500
depends on !CC_IS_CLANG
config 405_CPU
bool "40x family"
depends on 40x
depends on !CC_IS_CLANG
config 440_CPU
bool "440 (44x family)"
@ -196,22 +198,27 @@ config 440_CPU
config 464_CPU
bool "464 (44x family)"
depends on 44x
depends on !CC_IS_CLANG
config 476_CPU
bool "476 (47x family)"
depends on PPC_47x
depends on !CC_IS_CLANG
config 860_CPU
bool "8xx family"
depends on PPC_8xx
depends on !CC_IS_CLANG
config E300C2_CPU
bool "e300c2 (832x)"
depends on PPC_BOOK3S_32
depends on !CC_IS_CLANG
config E300C3_CPU
bool "e300c3 (831x)"
depends on PPC_BOOK3S_32
depends on !CC_IS_CLANG
config G4_CPU
bool "G4 (74xx)"
@ -228,13 +235,12 @@ config E500MC_CPU
config TOOLCHAIN_DEFAULT_CPU
bool "Rely on the toolchain's implicit default CPU"
depends on PPC32
endchoice
config TARGET_CPU_BOOL
bool
default !GENERIC_CPU && !TOOLCHAIN_DEFAULT_CPU
default !TOOLCHAIN_DEFAULT_CPU
config TARGET_CPU
string
@ -246,6 +252,10 @@ config TARGET_CPU
default "power8" if POWER8_CPU
default "power9" if POWER9_CPU
default "power10" if POWER10_CPU
default "e5500" if E5500_CPU
default "e6500" if E6500_CPU
default "power4" if POWERPC64_CPU && !CPU_LITTLE_ENDIAN
default "power8" if POWERPC64_CPU && CPU_LITTLE_ENDIAN
default "405" if 405_CPU
default "440" if 440_CPU
default "464" if 464_CPU

@ -19,8 +19,6 @@ typedef struct {
#ifdef CONFIG_SMP
/* A local icache flush is needed before user execution can resume. */
cpumask_t icache_stale_mask;
/* A local tlb flush is needed before user execution can resume. */
cpumask_t tlb_stale_mask;
#endif
} mm_context_t;

@ -22,24 +22,6 @@ static inline void local_flush_tlb_page(unsigned long addr)
{
ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
}
static inline void local_flush_tlb_all_asid(unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma x0, %0"
:
: "r" (asid)
: "memory");
}
static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma %0, %1"
:
: "r" (addr), "r" (asid)
: "memory");
}
#else /* CONFIG_MMU */
#define local_flush_tlb_all() do { } while (0)
#define local_flush_tlb_page(addr) do { } while (0)

@ -196,16 +196,6 @@ switch_mm_fast:
if (need_flush_tlb)
local_flush_tlb_all();
#ifdef CONFIG_SMP
else {
cpumask_t *mask = &mm->context.tlb_stale_mask;
if (cpumask_test_cpu(cpu, mask)) {
cpumask_clear_cpu(cpu, mask);
local_flush_tlb_all_asid(cntx & asid_mask);
}
}
#endif
}
static void set_mm_noasid(struct mm_struct *mm)
@ -215,12 +205,24 @@ static void set_mm_noasid(struct mm_struct *mm)
local_flush_tlb_all();
}
static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
static inline void set_mm(struct mm_struct *prev,
struct mm_struct *next, unsigned int cpu)
{
if (static_branch_unlikely(&use_asid_allocator))
set_mm_asid(mm, cpu);
else
set_mm_noasid(mm);
/*
* The mm_cpumask indicates which harts' TLBs contain the virtual
* address mapping of the mm. Compared to noasid, using asid
* can't guarantee that stale TLB entries are invalidated because
* the asid mechanism wouldn't flush TLB for every switch_mm for
* performance. So when using asid, keep all CPUs footmarks in
* cpumask() until mm reset.
*/
cpumask_set_cpu(cpu, mm_cpumask(next));
if (static_branch_unlikely(&use_asid_allocator)) {
set_mm_asid(next, cpu);
} else {
cpumask_clear_cpu(cpu, mm_cpumask(prev));
set_mm_noasid(next);
}
}
static int __init asids_init(void)
@ -274,7 +276,8 @@ static int __init asids_init(void)
}
early_initcall(asids_init);
#else
static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
static inline void set_mm(struct mm_struct *prev,
struct mm_struct *next, unsigned int cpu)
{
/* Nothing to do here when there is no MMU */
}
@ -327,10 +330,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/
cpu = smp_processor_id();
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
set_mm(next, cpu);
set_mm(prev, next, cpu);
flush_icache_deferred(next, cpu);
}

@ -5,7 +5,23 @@
#include <linux/sched.h>
#include <asm/sbi.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
static inline void local_flush_tlb_all_asid(unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma x0, %0"
:
: "r" (asid)
: "memory");
}
static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma %0, %1"
:
: "r" (addr), "r" (asid)
: "memory");
}
void flush_tlb_all(void)
{
@ -15,7 +31,6 @@ void flush_tlb_all(void)
static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride)
{
struct cpumask *pmask = &mm->context.tlb_stale_mask;
struct cpumask *cmask = mm_cpumask(mm);
unsigned int cpuid;
bool broadcast;
@ -29,15 +44,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
if (static_branch_unlikely(&use_asid_allocator)) {
unsigned long asid = atomic_long_read(&mm->context.id);
/*
* TLB will be immediately flushed on harts concurrently
* executing this MM context. TLB flush on other harts
* is deferred until this MM context migrates there.
*/
cpumask_setall(pmask);
cpumask_clear_cpu(cpuid, pmask);
cpumask_andnot(pmask, pmask, cmask);
if (broadcast) {
sbi_remote_sfence_vma_asid(cmask, start, size, asid);
} else if (size <= stride) {

@ -57,11 +57,19 @@ repeat:
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
intersects(initrd_data.start, initrd_data.size, safe_addr, size))
safe_addr = initrd_data.start + initrd_data.size;
if (intersects(safe_addr, size, (unsigned long)comps, comps->len)) {
safe_addr = (unsigned long)comps + comps->len;
goto repeat;
}
for_each_rb_entry(comp, comps)
if (intersects(safe_addr, size, comp->addr, comp->len)) {
safe_addr = comp->addr + comp->len;
goto repeat;
}
if (intersects(safe_addr, size, (unsigned long)certs, certs->len)) {
safe_addr = (unsigned long)certs + certs->len;
goto repeat;
}
for_each_rb_entry(cert, certs)
if (intersects(safe_addr, size, cert->addr, cert->len)) {
safe_addr = cert->addr + cert->len;

@ -544,8 +544,7 @@ static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
return r;
}
int zpci_setup_bus_resources(struct zpci_dev *zdev,
struct list_head *resources)
int zpci_setup_bus_resources(struct zpci_dev *zdev)
{
unsigned long addr, size, flags;
struct resource *res;
@ -581,7 +580,6 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev,
return -ENOMEM;
}
zdev->bars[i].res = res;
pci_add_resource(resources, res);
}
zdev->has_resources = 1;
@ -590,17 +588,23 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev,
static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
{
struct resource *res;
int i;
pci_lock_rescan_remove();
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (!zdev->bars[i].size || !zdev->bars[i].res)
res = zdev->bars[i].res;
if (!res)
continue;
release_resource(res);
pci_bus_remove_resource(zdev->zbus->bus, res);
zpci_free_iomap(zdev, zdev->bars[i].map_idx);
release_resource(zdev->bars[i].res);
kfree(zdev->bars[i].res);
zdev->bars[i].res = NULL;
kfree(res);
}
zdev->has_resources = 0;
pci_unlock_rescan_remove();
}
int pcibios_device_add(struct pci_dev *pdev)

@ -41,9 +41,7 @@ static int zpci_nb_devices;
*/
static int zpci_bus_prepare_device(struct zpci_dev *zdev)
{
struct resource_entry *window, *n;
struct resource *res;
int rc;
int rc, i;
if (!zdev_enabled(zdev)) {
rc = zpci_enable_device(zdev);
@ -57,10 +55,10 @@ static int zpci_bus_prepare_device(struct zpci_dev *zdev)
}
if (!zdev->has_resources) {
zpci_setup_bus_resources(zdev, &zdev->zbus->resources);
resource_list_for_each_entry_safe(window, n, &zdev->zbus->resources) {
res = window->res;
pci_bus_add_resource(zdev->zbus->bus, res, 0);
zpci_setup_bus_resources(zdev);
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (zdev->bars[i].res)
pci_bus_add_resource(zdev->zbus->bus, zdev->bars[i].res, 0);
}
}

@ -30,8 +30,7 @@ static inline void zpci_zdev_get(struct zpci_dev *zdev)
int zpci_alloc_domain(int domain);
void zpci_free_domain(int domain);
int zpci_setup_bus_resources(struct zpci_dev *zdev,
struct list_head *resources);
int zpci_setup_bus_resources(struct zpci_dev *zdev);
static inline struct zpci_dev *zdev_from_bus(struct pci_bus *bus,
unsigned int devfn)

@ -1,6 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
core-y += arch/x86/crypto/
#
# Disable SSE and other FP/SIMD instructions to match normal x86
#
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
ifeq ($(CONFIG_X86_32),y)
START := 0x8048000

@ -128,8 +128,9 @@ struct snp_psc_desc {
struct psc_entry entries[VMGEXIT_PSC_MAX_ENTRY];
} __packed;
/* Guest message request error code */
/* Guest message request error codes */
#define SNP_GUEST_REQ_INVALID_LEN BIT_ULL(32)
#define SNP_GUEST_REQ_ERR_BUSY BIT_ULL(33)
#define GHCB_MSR_TERM_REQ 0x100
#define GHCB_MSR_TERM_REASON_SET_POS 12

@ -256,20 +256,22 @@ enum avic_ipi_failure_cause {
AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
};
#define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(9, 0)
#define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(8, 0)
/*
* For AVIC, the max index allowed for physical APIC ID
* table is 0xff (255).
* For AVIC, the max index allowed for physical APIC ID table is 0xfe (254), as
* 0xff is a broadcast to all CPUs, i.e. can't be targeted individually.
*/
#define AVIC_MAX_PHYSICAL_ID 0XFEULL
/*
* For x2AVIC, the max index allowed for physical APIC ID
* table is 0x1ff (511).
* For x2AVIC, the max index allowed for physical APIC ID table is 0x1ff (511).
*/
#define X2AVIC_MAX_PHYSICAL_ID 0x1FFUL
static_assert((AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == AVIC_MAX_PHYSICAL_ID);
static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_MAX_PHYSICAL_ID);
#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL

@ -2365,6 +2365,7 @@ static void mce_restart(void)
{
mce_timer_delete_all();
on_each_cpu(mce_cpu_restart, NULL, 1);
mce_schedule_work();
}
/* Toggle features for corrected errors */

@ -374,7 +374,6 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
{
struct resctrl_schema *s;
struct rdtgroup *rdtgrp;
struct rdt_domain *dom;
struct rdt_resource *r;
char *tok, *resname;
int ret = 0;
@ -403,10 +402,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
goto out;
}
list_for_each_entry(s, &resctrl_schema_all, list) {
list_for_each_entry(dom, &s->res->domains, list)
memset(dom->staged_config, 0, sizeof(dom->staged_config));
}
rdt_staged_configs_clear();
while ((tok = strsep(&buf, "\n")) != NULL) {
resname = strim(strsep(&tok, ":"));
@ -451,6 +447,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
}
out:
rdt_staged_configs_clear();
rdtgroup_kn_unlock(of->kn);
cpus_read_unlock();
return ret ?: nbytes;

@ -537,5 +537,6 @@ bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
void __check_limbo(struct rdt_domain *d, bool force_free);
void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
void __init thread_throttle_mode_init(void);
void rdt_staged_configs_clear(void);
#endif /* _ASM_X86_RESCTRL_INTERNAL_H */

@ -78,6 +78,19 @@ void rdt_last_cmd_printf(const char *fmt, ...)
va_end(ap);
}
void rdt_staged_configs_clear(void)
{
struct rdt_resource *r;
struct rdt_domain *dom;
lockdep_assert_held(&rdtgroup_mutex);
for_each_alloc_capable_rdt_resource(r) {
list_for_each_entry(dom, &r->domains, list)
memset(dom->staged_config, 0, sizeof(dom->staged_config));
}
}
/*
* Trivial allocator for CLOSIDs. Since h/w only supports a small number,
* we can keep a bitmap of free CLOSIDs in a single integer.
@ -2851,7 +2864,9 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
{
struct resctrl_schema *s;
struct rdt_resource *r;
int ret;
int ret = 0;
rdt_staged_configs_clear();
list_for_each_entry(s, &resctrl_schema_all, list) {
r = s->res;
@ -2862,20 +2877,22 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
} else {
ret = rdtgroup_init_cat(s, rdtgrp->closid);
if (ret < 0)
return ret;
goto out;
}
ret = resctrl_arch_update_domains(r, rdtgrp->closid);
if (ret < 0) {
rdt_last_cmd_puts("Failed to initialize allocations\n");
return ret;
goto out;
}
}
rdtgrp->mode = RDT_MODE_SHAREABLE;
return 0;
out:
rdt_staged_configs_clear();
return ret;
}
static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,

@ -134,9 +134,11 @@ SYM_TYPED_FUNC_START(ftrace_stub)
RET
SYM_FUNC_END(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_TYPED_FUNC_START(ftrace_stub_graph)
RET
SYM_FUNC_END(ftrace_stub_graph)
#endif
#ifdef CONFIG_DYNAMIC_FTRACE

@ -2183,9 +2183,6 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
struct ghcb *ghcb;
int ret;
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
return -ENODEV;
if (!fw_err)
return -EINVAL;
@ -2212,15 +2209,26 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
if (ret)
goto e_put;
if (ghcb->save.sw_exit_info_2) {
/* Number of expected pages are returned in RBX */
if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
ghcb->save.sw_exit_info_2 == SNP_GUEST_REQ_INVALID_LEN)
input->data_npages = ghcb_get_rbx(ghcb);
*fw_err = ghcb->save.sw_exit_info_2;
switch (*fw_err) {
case 0:
break;
*fw_err = ghcb->save.sw_exit_info_2;
case SNP_GUEST_REQ_ERR_BUSY:
ret = -EAGAIN;
break;
case SNP_GUEST_REQ_INVALID_LEN:
/* Number of expected pages are returned in RBX */
if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
input->data_npages = ghcb_get_rbx(ghcb);
ret = -ENOSPC;
break;
}
fallthrough;
default:
ret = -EIO;
break;
}
e_put:

@ -27,19 +27,29 @@
#include "irq.h"
#include "svm.h"
/* AVIC GATAG is encoded using VM and VCPU IDs */
#define AVIC_VCPU_ID_BITS 8
#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
/*
* Encode the arbitrary VM ID and the vCPU's default APIC ID, i.e the vCPU ID,
* into the GATag so that KVM can retrieve the correct vCPU from a GALog entry
* if an interrupt can't be delivered, e.g. because the vCPU isn't running.
*
* For the vCPU ID, use however many bits are currently allowed for the max
* guest physical APIC ID (limited by the size of the physical ID table), and
* use whatever bits remain to assign arbitrary AVIC IDs to VMs. Note, the
* size of the GATag is defined by hardware (32 bits), but is an opaque value
* as far as hardware is concerned.
*/
#define AVIC_VCPU_ID_MASK AVIC_PHYSICAL_MAX_INDEX_MASK
#define AVIC_VM_ID_BITS 24
#define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS)
#define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1)
#define AVIC_VM_ID_SHIFT HWEIGHT32(AVIC_PHYSICAL_MAX_INDEX_MASK)
#define AVIC_VM_ID_MASK (GENMASK(31, AVIC_VM_ID_SHIFT) >> AVIC_VM_ID_SHIFT)
#define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
#define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VM_ID_SHIFT) | \
(y & AVIC_VCPU_ID_MASK))
#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VM_ID_SHIFT) & AVIC_VM_ID_MASK)
#define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
static_assert(AVIC_GATAG(AVIC_VM_ID_MASK, AVIC_VCPU_ID_MASK) == -1u);
static bool force_avic;
module_param_unsafe(force_avic, bool, 0444);

@ -2999,7 +2999,7 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12,
enum vm_entry_failure_code *entry_failure_code)
{
bool ia32e;
bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE);
*entry_failure_code = ENTRY_FAIL_DEFAULT;
@ -3025,6 +3025,13 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
vmcs12->guest_ia32_perf_global_ctrl)))
return -EINVAL;
if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG))
return -EINVAL;
if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) ||
CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG)))
return -EINVAL;
/*
* If the load IA32_EFER VM-entry control is 1, the following checks
* are performed on the field for the IA32_EFER MSR:
@ -3036,7 +3043,6 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
*/
if (to_vmx(vcpu)->nested.nested_run_pending &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&

@ -600,7 +600,8 @@ void __init sme_enable(struct boot_params *bp)
cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
((u64)bp->ext_cmd_line_ptr << 32));
cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer));
if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
return;
if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
sme_me_mask = me_mask;

@ -2711,6 +2711,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
struct blk_mq_hw_ctx *this_hctx = NULL;
struct blk_mq_ctx *this_ctx = NULL;
struct request *requeue_list = NULL;
struct request **requeue_lastp = &requeue_list;
unsigned int depth = 0;
LIST_HEAD(list);
@ -2721,10 +2722,10 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
this_hctx = rq->mq_hctx;
this_ctx = rq->mq_ctx;
} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
rq_list_add(&requeue_list, rq);
rq_list_add_tail(&requeue_lastp, rq);
continue;
}
list_add_tail(&rq->queuelist, &list);
list_add(&rq->queuelist, &list);
depth++;
} while (!rq_list_empty(plug->mq_list));

@ -537,16 +537,19 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
static struct acpi_table_header *acpi_get_pptt(void)
{
static struct acpi_table_header *pptt;
static bool is_pptt_checked;
acpi_status status;
/*
* PPTT will be used at runtime on every CPU hotplug in path, so we
* don't need to call acpi_put_table() to release the table mapping.
*/
if (!pptt) {
if (!pptt && !is_pptt_checked) {
status = acpi_get_table(ACPI_SIG_PPTT, 0, &pptt);
if (ACPI_FAILURE(status))
acpi_pptt_warn_missing();
is_pptt_checked = true;
}
return pptt;

@ -1853,35 +1853,44 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
static void loop_handle_cmd(struct loop_cmd *cmd)
{
struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css;
struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css;
struct request *rq = blk_mq_rq_from_pdu(cmd);
const bool write = op_is_write(req_op(rq));
struct loop_device *lo = rq->q->queuedata;
int ret = 0;
struct mem_cgroup *old_memcg = NULL;
const bool use_aio = cmd->use_aio;
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
ret = -EIO;
goto failed;
}
if (cmd->blkcg_css)
kthread_associate_blkcg(cmd->blkcg_css);
if (cmd->memcg_css)
if (cmd_blkcg_css)
kthread_associate_blkcg(cmd_blkcg_css);
if (cmd_memcg_css)
old_memcg = set_active_memcg(
mem_cgroup_from_css(cmd->memcg_css));
mem_cgroup_from_css(cmd_memcg_css));
/*
* do_req_filebacked() may call blk_mq_complete_request() synchronously
* or asynchronously if using aio. Hence, do not touch 'cmd' after
* do_req_filebacked() has returned unless we are sure that 'cmd' has
* not yet been completed.
*/
ret = do_req_filebacked(lo, rq);
if (cmd->blkcg_css)
if (cmd_blkcg_css)
kthread_associate_blkcg(NULL);
if (cmd->memcg_css) {
if (cmd_memcg_css) {
set_active_memcg(old_memcg);
css_put(cmd->memcg_css);
css_put(cmd_memcg_css);
}
failed:
/* complete non-aio request */
if (!cmd->use_aio || ret) {
if (!use_aio || ret) {
if (ret == -EOPNOTSUPP)
cmd->ret = ret;
else

@ -1393,8 +1393,7 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
case NULL_IRQ_SOFTIRQ:
switch (cmd->nq->dev->queue_mode) {
case NULL_Q_MQ:
if (likely(!blk_should_fake_timeout(cmd->rq->q)))
blk_mq_complete_request(cmd->rq);
blk_mq_complete_request(cmd->rq);
break;
case NULL_Q_BIO:
/*
@ -1655,7 +1654,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->rq = bd->rq;
cmd->error = BLK_STS_OK;
cmd->nq = nq;
cmd->fake_timeout = should_timeout_request(bd->rq);
cmd->fake_timeout = should_timeout_request(bd->rq) ||
blk_should_fake_timeout(bd->rq->q);
blk_mq_start_request(bd->rq);

@ -972,6 +972,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
print_version();
hp = mdesc_grab();
if (!hp)
return -ENODEV;
err = -ENODEV;
if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {

@ -92,7 +92,7 @@ config COMMON_CLK_RK808
config COMMON_CLK_HI655X
tristate "Clock driver for Hi655x" if EXPERT
depends on (MFD_HI655X_PMIC || COMPILE_TEST)
depends on REGMAP
select REGMAP
default MFD_HI655X_PMIC
help
This driver supports the hi655x PMIC clock. This

@ -103,7 +103,8 @@ static void psci_pd_remove(void)
struct psci_pd_provider *pd_provider, *it;
struct generic_pm_domain *genpd;
list_for_each_entry_safe(pd_provider, it, &psci_pd_providers, link) {
list_for_each_entry_safe_reverse(pd_provider, it,
&psci_pd_providers, link) {
of_genpd_del_provider(pd_provider->node);
genpd = of_genpd_remove_last(pd_provider->node);

@ -206,7 +206,7 @@ static int do_feature_check_call(const u32 api_id)
}
/* Add new entry if not present */
feature_data = kmalloc(sizeof(*feature_data), GFP_KERNEL);
feature_data = kmalloc(sizeof(*feature_data), GFP_ATOMIC);
if (!feature_data)
return -ENOMEM;

@ -1695,7 +1695,7 @@ static int psp_hdcp_initialize(struct psp_context *psp)
psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
if (!psp->hdcp_context.context.initialized) {
if (!psp->hdcp_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
if (ret)
return ret;
@ -1762,7 +1762,7 @@ static int psp_dtm_initialize(struct psp_context *psp)
psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
if (!psp->dtm_context.context.initialized) {
if (!psp->dtm_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
if (ret)
return ret;
@ -1830,7 +1830,7 @@ static int psp_rap_initialize(struct psp_context *psp)
psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
if (!psp->rap_context.context.initialized) {
if (!psp->rap_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
if (ret)
return ret;

@ -59,6 +59,7 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size);
static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
static int kfd_resume_iommu(struct kfd_dev *kfd);
static int kfd_resume(struct kfd_dev *kfd);
static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
@ -634,7 +635,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
svm_migrate_init(kfd->adev);
if (kgd2kfd_resume_iommu(kfd))
if (kfd_resume_iommu(kfd))
goto device_iommu_error;
if (kfd_resume(kfd))
@ -782,6 +783,14 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
}
int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
{
if (!kfd->init_complete)
return 0;
return kfd_resume_iommu(kfd);
}
static int kfd_resume_iommu(struct kfd_dev *kfd)
{
int err = 0;

@ -778,16 +778,13 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
struct kfd_event_waiter *event_waiters;
uint32_t i;
event_waiters = kmalloc_array(num_events,
sizeof(struct kfd_event_waiter),
GFP_KERNEL);
event_waiters = kcalloc(num_events, sizeof(struct kfd_event_waiter),
GFP_KERNEL);
if (!event_waiters)
return NULL;
for (i = 0; (event_waiters) && (i < num_events) ; i++) {
for (i = 0; i < num_events; i++)
init_wait(&event_waiters[i].wait);
event_waiters[i].activated = false;
}
return event_waiters;
}

@ -992,8 +992,5 @@ void dcn30_prepare_bandwidth(struct dc *dc,
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
dcn20_prepare_bandwidth(dc, context);
dc_dmub_srv_p_state_delegate(dc,
context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context);
}

@ -1883,6 +1883,7 @@ int dcn32_populate_dml_pipes_from_context(
bool subvp_in_use = false;
uint8_t is_pipe_split_expected[MAX_PIPES] = {0};
struct dc_crtc_timing *timing;
bool vsr_odm_support = false;
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
@ -1900,12 +1901,15 @@ int dcn32_populate_dml_pipes_from_context(
timing = &pipe->stream->timing;
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
vsr_odm_support = (res_ctx->pipe_ctx[i].stream->src.width >= 5120 &&
res_ctx->pipe_ctx[i].stream->src.width > res_ctx->pipe_ctx[i].stream->dst.width);
if (context->stream_count == 1 &&
context->stream_status[0].plane_count == 1 &&
!dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) &&
pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ &&
dc->debug.enable_single_display_2to1_odm_policy) {
dc->debug.enable_single_display_2to1_odm_policy &&
!vsr_odm_support) { //excluding 2to1 ODM combine on >= 5k vsr
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
}
pipe_cnt++;

@ -1802,7 +1802,10 @@ static unsigned int CalculateVMAndRowBytes(
}
if (SurfaceTiling == dm_sw_linear) {
*dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
if (PTEBufferSizeInRequests == 0)
*dpte_row_height = 1;
else
*dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
*dpte_row_width_ub = (dml_ceil(((double) SwathWidth - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
} else if (ScanDirection != dm_vert) {

@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
#define PMFW_DRIVER_IF_VERSION 7
#define PMFW_DRIVER_IF_VERSION 8
typedef struct {
int32_t value;
@ -198,7 +198,7 @@ typedef struct {
uint16_t SkinTemp;
uint16_t DeviceState;
uint16_t CurTemp; //[centi-Celsius]
uint16_t spare2;
uint16_t FilterAlphaValue;
uint16_t AverageGfxclkFrequency;
uint16_t AverageFclkFrequency;

@ -29,7 +29,7 @@
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37

@ -2143,16 +2143,9 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
(OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
OverDriveTable_t *user_od_table =
(OverDriveTable_t *)smu->smu_table.user_overdrive_table;
OverDriveTable_t user_od_table_bak;
int ret = 0;
/*
* For S3/S4/Runpm resume, no need to setup those overdrive tables again as
* - either they already have the default OD settings got during cold bootup
* - or they have some user customized OD settings which cannot be overwritten
*/
if (smu->adev->in_suspend)
return 0;
ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE,
0, (void *)boot_od_table, false);
if (ret) {
@ -2163,7 +2156,23 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
sienna_cichlid_dump_od_table(smu, boot_od_table);
memcpy(od_table, boot_od_table, sizeof(OverDriveTable_t));
memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
/*
* For S3/S4/Runpm resume, we need to setup those overdrive tables again,
* but we have to preserve user defined values in "user_od_table".
*/
if (!smu->adev->in_suspend) {
memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
smu->user_dpm_profile.user_od = false;
} else if (smu->user_dpm_profile.user_od) {
memcpy(&user_od_table_bak, user_od_table, sizeof(OverDriveTable_t));
memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
user_od_table->GfxclkFmin = user_od_table_bak.GfxclkFmin;
user_od_table->GfxclkFmax = user_od_table_bak.GfxclkFmax;
user_od_table->UclkFmin = user_od_table_bak.UclkFmin;
user_od_table->UclkFmax = user_od_table_bak.UclkFmax;
user_od_table->VddGfxOffset = user_od_table_bak.VddGfxOffset;
}