Update Linux from version 6.1.13 to 6.1.14.

main inf-kernel-0.0.0.13
inference 3 months ago
parent fb25ec9d54
commit 350f635a36
Signed by: inference
SSH Key Fingerprint: SHA256:9Pl0nZ2UJacgm+IeEtLSZ4FOESgP1eKCtRflfPfdX9M

@ -15,10 +15,10 @@ HiSilicon PCIe PMU driver
The PCIe PMU driver registers a perf PMU with the name of its sicl-id and PCIe
Core id.::
/sys/bus/event_source/hisi_pcie<sicl>_<core>
/sys/bus/event_source/hisi_pcie<sicl>_core<core>
PMU driver provides description of available events and filter options in sysfs,
see /sys/bus/event_source/devices/hisi_pcie<sicl>_<core>.
see /sys/bus/event_source/devices/hisi_pcie<sicl>_core<core>.
The "format" directory describes all formats of the config (events) and config1
(filter options) fields of the perf_event_attr structure. The "events" directory
@ -33,13 +33,13 @@ monitored by PMU.
Example usage of perf::
$# perf list
hisi_pcie0_0/rx_mwr_latency/ [kernel PMU event]
hisi_pcie0_0/rx_mwr_cnt/ [kernel PMU event]
hisi_pcie0_core0/rx_mwr_latency/ [kernel PMU event]
hisi_pcie0_core0/rx_mwr_cnt/ [kernel PMU event]
------------------------------------------
$# perf stat -e hisi_pcie0_0/rx_mwr_latency/
$# perf stat -e hisi_pcie0_0/rx_mwr_cnt/
$# perf stat -g -e hisi_pcie0_0/rx_mwr_latency/ -e hisi_pcie0_0/rx_mwr_cnt/
$# perf stat -e hisi_pcie0_core0/rx_mwr_latency/
$# perf stat -e hisi_pcie0_core0/rx_mwr_cnt/
$# perf stat -g -e hisi_pcie0_core0/rx_mwr_latency/ -e hisi_pcie0_core0/rx_mwr_cnt/
The current driver does not support sampling. So "perf record" is unsupported.
Also attach to a task is unsupported for PCIe PMU.
@ -64,7 +64,7 @@ bit8 is set, port=0x100; if these two Root Ports are both monitored, port=0x101.
Example usage of perf::
$# perf stat -e hisi_pcie0_0/rx_mwr_latency,port=0x1/ sleep 5
$# perf stat -e hisi_pcie0_core0/rx_mwr_latency,port=0x1/ sleep 5
-bdf
@ -76,7 +76,7 @@ For example, "bdf=0x3900" means BDF of target Endpoint is 0000:39:00.0.
Example usage of perf::
$# perf stat -e hisi_pcie0_0/rx_mrd_flux,bdf=0x3900/ sleep 5
$# perf stat -e hisi_pcie0_core0/rx_mrd_flux,bdf=0x3900/ sleep 5
2. Trigger filter
Event statistics start when the first time TLP length is greater/smaller
@ -90,7 +90,7 @@ means start when TLP length < condition.
Example usage of perf::
$# perf stat -e hisi_pcie0_0/rx_mrd_flux,trig_len=0x4,trig_mode=1/ sleep 5
$# perf stat -e hisi_pcie0_core0/rx_mrd_flux,trig_len=0x4,trig_mode=1/ sleep 5
3. Threshold filter
Counter counts when TLP length within the specified range. You can set the
@ -103,4 +103,4 @@ when TLP length < threshold.
Example usage of perf::
$# perf stat -e hisi_pcie0_0/rx_mrd_flux,thr_len=0x4,thr_mode=1/ sleep 5
$# perf stat -e hisi_pcie0_core0/rx_mrd_flux,thr_len=0x4,thr_mode=1/ sleep 5

@ -3444,7 +3444,7 @@ F: drivers/net/ieee802154/atusb.h
AUDIT SUBSYSTEM
M: Paul Moore <paul@paul-moore.com>
M: Eric Paris <eparis@redhat.com>
L: linux-audit@redhat.com (moderated for non-subscribers)
L: audit@vger.kernel.org
S: Supported
W: https://github.com/linux-audit
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/audit.git

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 13
SUBLEVEL = 14
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

@ -0,0 +1,44 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* QorIQ FMan v3 10g port #2 device tree stub [ controller @ offset 0x400000 ]
*
* Copyright 2022 Sean Anderson <sean.anderson@seco.com>
* Copyright 2012 - 2015 Freescale Semiconductor Inc.
*/
fman@400000 {
fman0_rx_0x08: port@88000 {
cell-index = <0x8>;
compatible = "fsl,fman-v3-port-rx";
reg = <0x88000 0x1000>;
fsl,fman-10g-port;
};
fman0_tx_0x28: port@a8000 {
cell-index = <0x28>;
compatible = "fsl,fman-v3-port-tx";
reg = <0xa8000 0x1000>;
fsl,fman-10g-port;
};
ethernet@e0000 {
cell-index = <0>;
compatible = "fsl,fman-memac";
reg = <0xe0000 0x1000>;
fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>;
ptp-timer = <&ptp_timer0>;
pcsphy-handle = <&pcsphy0>;
};
mdio@e1000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe1000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy0: ethernet-phy@0 {
reg = <0x0>;
};
};
};

@ -0,0 +1,44 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* QorIQ FMan v3 10g port #3 device tree stub [ controller @ offset 0x400000 ]
*
* Copyright 2022 Sean Anderson <sean.anderson@seco.com>
* Copyright 2012 - 2015 Freescale Semiconductor Inc.
*/
fman@400000 {
fman0_rx_0x09: port@89000 {
cell-index = <0x9>;
compatible = "fsl,fman-v3-port-rx";
reg = <0x89000 0x1000>;
fsl,fman-10g-port;
};
fman0_tx_0x29: port@a9000 {
cell-index = <0x29>;
compatible = "fsl,fman-v3-port-tx";
reg = <0xa9000 0x1000>;
fsl,fman-10g-port;
};
ethernet@e2000 {
cell-index = <1>;
compatible = "fsl,fman-memac";
reg = <0xe2000 0x1000>;
fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>;
ptp-timer = <&ptp_timer0>;
pcsphy-handle = <&pcsphy1>;
};
mdio@e3000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe3000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy1: ethernet-phy@0 {
reg = <0x0>;
};
};
};

@ -609,8 +609,8 @@
/include/ "qoriq-bman1.dtsi"
/include/ "qoriq-fman3-0.dtsi"
/include/ "qoriq-fman3-0-1g-0.dtsi"
/include/ "qoriq-fman3-0-1g-1.dtsi"
/include/ "qoriq-fman3-0-10g-2.dtsi"
/include/ "qoriq-fman3-0-10g-3.dtsi"
/include/ "qoriq-fman3-0-1g-2.dtsi"
/include/ "qoriq-fman3-0-1g-3.dtsi"
/include/ "qoriq-fman3-0-1g-4.dtsi"
@ -659,3 +659,19 @@
interrupts = <16 2 1 9>;
};
};
&fman0_rx_0x08 {
/delete-property/ fsl,fman-10g-port;
};
&fman0_tx_0x28 {
/delete-property/ fsl,fman-10g-port;
};
&fman0_rx_0x09 {
/delete-property/ fsl,fman-10g-port;
};
&fman0_tx_0x29 {
/delete-property/ fsl,fman-10g-port;
};

@ -8,6 +8,7 @@
#define BSS_FIRST_SECTIONS *(.bss.prominit)
#define EMITS_PT_NOTE
#define RO_EXCEPTION_TABLE_ALIGN 0
#define RUNTIME_DISCARD_EXIT
#define SOFT_MASK_TABLE(align) \
. = ALIGN(align); \
@ -410,9 +411,12 @@ SECTIONS
DISCARDS
/DISCARD/ : {
*(*.EMB.apuinfo)
*(.glink .iplt .plt .rela* .comment)
*(.glink .iplt .plt .comment)
*(.gnu.version*)
*(.gnu.attributes)
*(.eh_frame)
#ifndef CONFIG_RELOCATABLE
*(.rela*)
#endif
}
}

@ -234,6 +234,14 @@ void radix__mark_rodata_ro(void)
end = (unsigned long)__end_rodata;
radix__change_memory_range(start, end, _PAGE_WRITE);
for (start = PAGE_OFFSET; start < (unsigned long)_stext; start += PAGE_SIZE) {
end = start + PAGE_SIZE;
if (overlaps_interrupt_vector_text(start, end))
radix__change_memory_range(start, end, _PAGE_WRITE);
else
break;
}
}
void radix__mark_initmem_nx(void)
@ -268,6 +276,11 @@ static unsigned long next_boundary(unsigned long addr, unsigned long end)
// Relocatable kernel running at non-zero real address
if (stext_phys != 0) {
// The end of interrupts code at zero is a rodata boundary
unsigned long end_intr = __pa_symbol(__end_interrupts) - stext_phys;
if (addr < end_intr)
return end_intr;
// Start of relocated kernel text is a rodata boundary
if (addr < stext_phys)
return stext_phys;

@ -17,6 +17,8 @@
/* Handle ro_after_init data on our own. */
#define RO_AFTER_INIT_DATA
#define RUNTIME_DISCARD_EXIT
#define EMITS_PT_NOTE
#include <asm-generic/vmlinux.lds.h>

@ -4,6 +4,7 @@
* Written by Niibe Yutaka and Paul Mundt
*/
OUTPUT_ARCH(sh)
#define RUNTIME_DISCARD_EXIT
#include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm/vmlinux.lds.h>

@ -183,6 +183,37 @@ void int3_emulate_ret(struct pt_regs *regs)
unsigned long ip = int3_emulate_pop(regs);
int3_emulate_jmp(regs, ip);
}
static __always_inline
void int3_emulate_jcc(struct pt_regs *regs, u8 cc, unsigned long ip, unsigned long disp)
{
static const unsigned long jcc_mask[6] = {
[0] = X86_EFLAGS_OF,
[1] = X86_EFLAGS_CF,
[2] = X86_EFLAGS_ZF,
[3] = X86_EFLAGS_CF | X86_EFLAGS_ZF,
[4] = X86_EFLAGS_SF,
[5] = X86_EFLAGS_PF,
};
bool invert = cc & 1;
bool match;
if (cc < 0xc) {
match = regs->flags & jcc_mask[cc >> 1];
} else {
match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
if (cc >= 0xe)
match = match || (regs->flags & X86_EFLAGS_ZF);
}
if ((match && !invert) || (!match && invert))
ip += disp;
int3_emulate_jmp(regs, ip);
}
#endif /* !CONFIG_UML_X86 */
#endif /* _ASM_X86_TEXT_PATCHING_H */

@ -339,6 +339,12 @@ next:
}
}
static inline bool is_jcc32(struct insn *insn)
{
/* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
}
#if defined(CONFIG_RETPOLINE) && defined(CONFIG_OBJTOOL)
/*
@ -427,8 +433,7 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
* [ NOP ]
* 1:
*/
/* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
if (op == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80) {
if (is_jcc32(insn)) {
cc = insn->opcode.bytes[1] & 0xf;
cc ^= 1; /* invert condition */
@ -1311,6 +1316,11 @@ void text_poke_sync(void)
on_each_cpu(do_sync_core, NULL, 1);
}
/*
* NOTE: crazy scheme to allow patching Jcc.d32 but not increase the size of
* this thing. When len == 6 everything is prefixed with 0x0f and we map
* opcode to Jcc.d8, using len to distinguish.
*/
struct text_poke_loc {
/* addr := _stext + rel_addr */
s32 rel_addr;
@ -1432,6 +1442,10 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
int3_emulate_jmp(regs, (long)ip + tp->disp);
break;
case 0x70 ... 0x7f: /* Jcc */
int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp);
break;
default:
BUG();
}
@ -1505,16 +1519,26 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
* Second step: update all but the first byte of the patched range.
*/
for (do_sync = 0, i = 0; i < nr_entries; i++) {
u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, };
u8 _new[POKE_MAX_OPCODE_SIZE+1];
const u8 *new = tp[i].text;
int len = tp[i].len;
if (len - INT3_INSN_SIZE > 0) {
memcpy(old + INT3_INSN_SIZE,
text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
len - INT3_INSN_SIZE);
if (len == 6) {
_new[0] = 0x0f;
memcpy(_new + 1, new, 5);
new = _new;
}
text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
(const char *)tp[i].text + INT3_INSN_SIZE,
new + INT3_INSN_SIZE,
len - INT3_INSN_SIZE);
do_sync++;
}
@ -1542,8 +1566,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
* The old instruction is recorded so that the event can be
* processed forwards or backwards.
*/
perf_event_text_poke(text_poke_addr(&tp[i]), old, len,
tp[i].text, len);
perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len);
}
if (do_sync) {
@ -1560,10 +1583,15 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
* replacing opcode.
*/
for (do_sync = 0, i = 0; i < nr_entries; i++) {
if (tp[i].text[0] == INT3_INSN_OPCODE)
u8 byte = tp[i].text[0];
if (tp[i].len == 6)
byte = 0x0f;
if (byte == INT3_INSN_OPCODE)
continue;
text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE);
text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE);
do_sync++;
}
@ -1581,9 +1609,11 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
const void *opcode, size_t len, const void *emulate)
{
struct insn insn;
int ret, i;
int ret, i = 0;
memcpy((void *)tp->text, opcode, len);
if (len == 6)
i = 1;
memcpy((void *)tp->text, opcode+i, len-i);
if (!emulate)
emulate = opcode;
@ -1594,6 +1624,13 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
tp->len = len;
tp->opcode = insn.opcode.bytes[0];
if (is_jcc32(&insn)) {
/*
* Map Jcc.d32 onto Jcc.d8 and use len to distinguish.
*/
tp->opcode = insn.opcode.bytes[1] - 0x10;
}
switch (tp->opcode) {
case RET_INSN_OPCODE:
case JMP32_INSN_OPCODE:
@ -1610,7 +1647,6 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
BUG_ON(len != insn.length);
};
switch (tp->opcode) {
case INT3_INSN_OPCODE:
case RET_INSN_OPCODE:
@ -1619,6 +1655,7 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
case CALL_INSN_OPCODE:
case JMP32_INSN_OPCODE:
case JMP8_INSN_OPCODE:
case 0x70 ... 0x7f: /* Jcc */
tp->disp = insn.immediate.value;
break;

@ -471,50 +471,26 @@ static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs)
}
NOKPROBE_SYMBOL(kprobe_emulate_call);
static nokprobe_inline
void __kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs, bool cond)
static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
{
unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
if (cond)
ip += p->ainsn.rel32;
ip += p->ainsn.rel32;
int3_emulate_jmp(regs, ip);
}
static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
{
__kprobe_emulate_jmp(p, regs, true);
}
NOKPROBE_SYMBOL(kprobe_emulate_jmp);
static const unsigned long jcc_mask[6] = {
[0] = X86_EFLAGS_OF,
[1] = X86_EFLAGS_CF,
[2] = X86_EFLAGS_ZF,
[3] = X86_EFLAGS_CF | X86_EFLAGS_ZF,
[4] = X86_EFLAGS_SF,
[5] = X86_EFLAGS_PF,
};
static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
{
bool invert = p->ainsn.jcc.type & 1;
bool match;
unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
if (p->ainsn.jcc.type < 0xc) {
match = regs->flags & jcc_mask[p->ainsn.jcc.type >> 1];
} else {
match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
if (p->ainsn.jcc.type >= 0xe)
match = match || (regs->flags & X86_EFLAGS_ZF);
}
__kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert));
int3_emulate_jcc(regs, p->ainsn.jcc.type, ip, p->ainsn.rel32);
}
NOKPROBE_SYMBOL(kprobe_emulate_jcc);
static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
{
unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
bool match;
if (p->ainsn.loop.type != 3) { /* LOOP* */
@ -542,7 +518,9 @@ static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
else if (p->ainsn.loop.type == 1) /* LOOPE */
match = match && (regs->flags & X86_EFLAGS_ZF);
__kprobe_emulate_jmp(p, regs, match);
if (match)
ip += p->ainsn.rel32;
int3_emulate_jmp(regs, ip);
}
NOKPROBE_SYMBOL(kprobe_emulate_loop);

@ -9,6 +9,7 @@ enum insn_type {
NOP = 1, /* site cond-call */
JMP = 2, /* tramp / site tail-call */
RET = 3, /* tramp / site cond-tail-call */
JCC = 4,
};
/*
@ -25,12 +26,39 @@ static const u8 xor5rax[] = { 0x2e, 0x2e, 0x2e, 0x31, 0xc0 };
static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc };
static u8 __is_Jcc(u8 *insn) /* Jcc.d32 */
{
u8 ret = 0;
if (insn[0] == 0x0f) {
u8 tmp = insn[1];
if ((tmp & 0xf0) == 0x80)
ret = tmp;
}
return ret;
}
extern void __static_call_return(void);
asm (".global __static_call_return\n\t"
".type __static_call_return, @function\n\t"
"__static_call_return:\n\t"
ANNOTATE_NOENDBR
ANNOTATE_RETPOLINE_SAFE
"ret; int3\n\t"
".size __static_call_return, . - __static_call_return \n\t");
static void __ref __static_call_transform(void *insn, enum insn_type type,
void *func, bool modinit)
{
const void *emulate = NULL;
int size = CALL_INSN_SIZE;
const void *code;
u8 op, buf[6];
if ((type == JMP || type == RET) && (op = __is_Jcc(insn)))
type = JCC;
switch (type) {
case CALL:
@ -56,6 +84,20 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
else
code = &retinsn;
break;
case JCC:
if (!func) {
func = __static_call_return;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
func = __x86_return_thunk;
}
buf[0] = 0x0f;
__text_gen_insn(buf+1, op, insn+1, func, 5);
code = buf;
size = 6;
break;
}
if (memcmp(insn, code, size) == 0)
@ -67,9 +109,9 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
text_poke_bp(insn, code, size, emulate);
}
static void __static_call_validate(void *insn, bool tail, bool tramp)
static void __static_call_validate(u8 *insn, bool tail, bool tramp)
{
u8 opcode = *(u8 *)insn;
u8 opcode = insn[0];
if (tramp && memcmp(insn+5, tramp_ud, 3)) {
pr_err("trampoline signature fail");
@ -78,7 +120,8 @@ static void __static_call_validate(void *insn, bool tail, bool tramp)
if (tail) {
if (opcode == JMP32_INSN_OPCODE ||
opcode == RET_INSN_OPCODE)
opcode == RET_INSN_OPCODE ||
__is_Jcc(insn))
return;
} else {
if (opcode == CALL_INSN_OPCODE ||

@ -3889,8 +3889,14 @@ static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
{
if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
to_svm(vcpu)->vmcb->control.exit_info_1)
struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
/*
* Note, the next RIP must be provided as SRCU isn't held, i.e. KVM
* can't read guest memory (dereference memslots) to decode the WRMSR.
*/
if (control->exit_code == SVM_EXIT_MSR && control->exit_info_1 &&
nrips && control->next_rip)
return handle_fastpath_set_msr_irqoff(vcpu);
return EXIT_FASTPATH_NONE;

@ -4767,6 +4767,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
/*
* If IBRS is advertised to the vCPU, KVM must flush the indirect
* branch predictors when transitioning from L2 to L1, as L1 expects
* hardware (KVM in this case) to provide separate predictor modes.
* Bare metal isolates VMX root (host) from VMX non-root (guest), but
* doesn't isolate different VMCSs, i.e. in this case, doesn't provide
* separate modes for L2 vs L1.
*/
if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
indirect_branch_prediction_barrier();
/* Update any VMCS fields that might have changed while L2 ran */
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);

@ -1348,8 +1348,10 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
/*
* No indirect branch prediction barrier needed when switching
* the active VMCS within a guest, e.g. on nested VM-Enter.
* The L1 VMM can protect itself with retpolines, IBPB or IBRS.
* the active VMCS within a vCPU, unless IBRS is advertised to
* the vCPU. To minimize the number of IBPBs executed, KVM
* performs IBPB on nested VM-Exit (a single nested transition
* may switch the active VMCS multiple times).
*/
if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
indirect_branch_prediction_barrier();

@ -8860,7 +8860,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
write_fault_to_spt,
emulation_type))
return 1;
if (ctxt->have_exception) {
if (ctxt->have_exception &&
!(emulation_type & EMULTYPE_SKIP)) {
/*
* #UD should result in just EMULATION_FAILED, and trap-like
* exception should not be encountered during decode.

@ -1757,18 +1757,42 @@ static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
static int kvm_xen_eventfd_reset(struct kvm *kvm)
{
struct evtchnfd *evtchnfd;
struct evtchnfd *evtchnfd, **all_evtchnfds;
int i;
int n = 0;
mutex_lock(&kvm->lock);
/*
* Because synchronize_srcu() cannot be called inside the
* critical section, first collect all the evtchnfd objects
* in an array as they are removed from evtchn_ports.
*/
idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i)
n++;
all_evtchnfds = kmalloc_array(n, sizeof(struct evtchnfd *), GFP_KERNEL);
if (!all_evtchnfds) {
mutex_unlock(&kvm->lock);
return -ENOMEM;
}
n = 0;
idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
all_evtchnfds[n++] = evtchnfd;
idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port);
synchronize_srcu(&kvm->srcu);
}
mutex_unlock(&kvm->lock);
synchronize_srcu(&kvm->srcu);
while (n--) {
evtchnfd = all_evtchnfds[n];
if (!evtchnfd->deliver.port.port)
eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
kfree(evtchnfd);
}
mutex_unlock(&kvm->lock);
kfree(all_evtchnfds);
return 0;
}

@ -316,6 +316,90 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe0d0), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x10ab, 0x9108), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x10ab, 0x9109), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x10ab, 0x9208), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x10ab, 0x9209), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x10ab, 0x9308), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x10ab, 0x9408), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x10ab, 0x9508), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x10ab, 0x9509), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x10ab, 0x9608), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x10ab, 0x9609), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x10ab, 0x9f09), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x04ca, 0x3022), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0c7), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0c9), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0ca), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0cb), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0ce), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0de), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0df), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0e1), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0ea), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0ec), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x04ca, 0x3023), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x04ca, 0x3024), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x04ca, 0x3a22), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x04ca, 0x3a24), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x04ca, 0x3a26), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x04ca, 0x3a27), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
/* QCA WCN785x chipset */
{ USB_DEVICE(0x0cf3, 0xe700), .driver_info = BTUSB_QCA_WCN6855 |

@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config CLK_LGM_CGU
depends on OF && HAS_IOMEM && (X86 || COMPILE_TEST)
select MFD_SYSCON
select OF_EARLY_FLATTREE
bool "Clock driver for Lightning Mountain(LGM) platform"
help
Clock Generation Unit(CGU) driver for Intel Lightning Mountain(LGM)
network processor SoC.
Clock Generation Unit(CGU) driver for MaxLinear's x86 based
Lightning Mountain(LGM) network processor SoC.

@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 MaxLinear, Inc.
* Copyright (C) 2020 Intel Corporation.
* Zhu YiXin <yixin.zhu@intel.com>
* Rahul Tanwar <rahul.tanwar@intel.com>
* Zhu Yixin <yzhu@maxlinear.com>
* Rahul Tanwar <rtanwar@maxlinear.com>
*/
#include <linux/clk-provider.h>
@ -40,13 +41,10 @@ static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate)
{
struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
unsigned int div, mult, frac;
unsigned long flags;
spin_lock_irqsave(&pll->lock, flags);
mult = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 0, 12);
div = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 18, 6);
frac = lgm_get_clk_val(pll->membase, pll->reg, 2, 24);
spin_unlock_irqrestore(&pll->lock, flags);
if (pll->type == TYPE_LJPLL)
div *= 4;
@ -57,12 +55,9 @@ static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate)
static int lgm_pll_is_enabled(struct clk_hw *hw)
{
struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
unsigned long flags;
unsigned int ret;
spin_lock_irqsave(&pll->lock, flags);
ret = lgm_get_clk_val(pll->membase, pll->reg, 0, 1);
spin_unlock_irqrestore(&pll->lock, flags);
return ret;
}
@ -70,15 +65,13 @@ static int lgm_pll_is_enabled(struct clk_hw *hw)
static int lgm_pll_enable(struct clk_hw *hw)
{
struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
unsigned long flags;
u32 val;
int ret;
spin_lock_irqsave(&pll->lock, flags);
lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1);
ret = readl_poll_timeout_atomic(pll->membase + pll->reg,
val, (val & 0x1), 1, 100);
spin_unlock_irqrestore(&pll->lock, flags);
ret = regmap_read_poll_timeout_atomic(pll->membase, pll->reg,
val, (val & 0x1), 1, 100);
return ret;
}
@ -86,11 +79,8 @@ static int lgm_pll_enable(struct clk_hw *hw)
static void lgm_pll_disable(struct clk_hw *hw)
{
struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
unsigned long flags;
spin_lock_irqsave(&pll->lock, flags);
lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 0);
spin_unlock_irqrestore(&pll->lock, flags);
}
static const struct clk_ops lgm_pll_ops = {
@ -121,7 +111,6 @@ lgm_clk_register_pll(struct lgm_clk_provider *ctx,
return ERR_PTR(-ENOMEM);
pll->membase = ctx->membase;
pll->lock = ctx->lock;
pll->reg = list->reg;
pll->flags = list->flags;
pll->type = list->type;

@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 MaxLinear, Inc.
* Copyright (C) 2020 Intel Corporation.
* Zhu YiXin <yixin.zhu@intel.com>
* Rahul Tanwar <rahul.tanwar@intel.com>
* Zhu Yixin <yzhu@maxlinear.com>
* Rahul Tanwar <rtanwar@maxlinear.com>
*/
#include <linux/clk-provider.h>
#include <linux/device.h>
@ -24,14 +25,10 @@
static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
const struct lgm_clk_branch *list)
{
unsigned long flags;
if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
spin_lock_irqsave(&ctx->lock, flags);
if (list->div_flags & CLOCK_FLAG_VAL_INIT)
lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
list->div_width, list->div_val);
spin_unlock_irqrestore(&ctx->lock, flags);
}
return clk_hw_register_fixed_rate(NULL, list->name,
list->parent_data[0].name,
@ -41,33 +38,27 @@ static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
static u8 lgm_clk_mux_get_parent(struct clk_hw *hw)
{
struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
unsigned long flags;
u32 val;
spin_lock_irqsave(&mux->lock, flags);
if (mux->flags & MUX_CLK_SW)
val = mux->reg;
else
val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift,
mux->width);
spin_unlock_irqrestore(&mux->lock, flags);
return clk_mux_val_to_index(hw, NULL, mux->flags, val);
}
static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
unsigned long flags;
u32 val;
val = clk_mux_index_to_val(NULL, mux->flags, index);
spin_lock_irqsave(&mux->lock, flags);
if (mux->flags & MUX_CLK_SW)
mux->reg = val;
else
lgm_set_clk_val(mux->membase, mux->reg, mux->shift,
mux->width, val);
spin_unlock_irqrestore(&mux->lock, flags);
return 0;
}
@ -90,7 +81,7 @@ static struct clk_hw *
lgm_clk_register_mux(struct lgm_clk_provider *ctx,
const struct lgm_clk_branch *list)
{
unsigned long flags, cflags = list->mux_flags;
unsigned long cflags = list->mux_flags;
struct device *dev = ctx->dev;
u8 shift = list->mux_shift;
u8 width = list->mux_width;
@ -111,7 +102,6 @@ lgm_clk_register_mux(struct lgm_clk_provider *ctx,
init.num_parents = list->num_parents;
mux->membase = ctx->membase;
mux->lock = ctx->lock;
mux->reg = reg;
mux->shift = shift;
mux->width = width;
@ -123,11 +113,8 @@ lgm_clk_register_mux(struct lgm_clk_provider *ctx,
if (ret)
return ERR_PTR(ret);
if (cflags & CLOCK_FLAG_VAL_INIT) {
spin_lock_irqsave(&mux->lock, flags);
if (cflags & CLOCK_FLAG_VAL_INIT)
lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val);
spin_unlock_irqrestore(&mux->lock, flags);
}
return hw;
}
@ -136,13 +123,10 @@ static unsigned long
lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
unsigned long flags;
unsigned int val;
spin_lock_irqsave(&divider->lock, flags);
val = lgm_get_clk_val(divider->membase, divider->reg,
divider->shift, divider->width);
spin_unlock_irqrestore(&divider->lock, flags);
return divider_recalc_rate(hw, parent_rate, val, divider->table,
divider->flags, divider->width);
@ -163,7 +147,6 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
unsigned long flags;
int value;
value = divider_get_val(rate, prate, divider->table,
@ -171,10 +154,8 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
if (value < 0)
return value;
spin_lock_irqsave(&divider->lock, flags);
lgm_set_clk_val(divider->membase, divider->reg,
divider->shift, divider->width, value);
spin_unlock_irqrestore(&divider->lock, flags);
return 0;
}
@ -182,12 +163,10 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable)
{
struct lgm_clk_divider *div = to_lgm_clk_divider(hw);
unsigned long flags;
spin_lock_irqsave(&div->lock, flags);
lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
div->width_gate, enable);
spin_unlock_irqrestore(&div->lock, flags);
if (div->flags != DIV_CLK_NO_MASK)
lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
div->width_gate, enable);
return 0;
}
@ -213,7 +192,7 @@ static struct clk_hw *
lgm_clk_register_divider(struct lgm_clk_provider *ctx,
const struct lgm_clk_branch *list)
{
unsigned long flags, cflags = list->div_flags;
unsigned long cflags = list->div_flags;
struct device *dev = ctx->dev;
struct lgm_clk_divider *div;
struct clk_init_data init = {};
@ -236,7 +215,6 @@ lgm_clk_register_divider(struct lgm_clk_provider *ctx,
init.num_parents = 1;
div->membase = ctx->membase;
div->lock = ctx->lock;
div->reg = reg;
div->shift = shift;
div->width = width;
@ -251,11 +229,8 @@ lgm_clk_register_divider(struct lgm_clk_provider *ctx,
if (ret)
return ERR_PTR(ret);
if (cflags & CLOCK_FLAG_VAL_INIT) {
spin_lock_irqsave(&div->lock, flags);
if (cflags & CLOCK_FLAG_VAL_INIT)
lgm_set_clk_val(div->membase, reg, shift, width, list->div_val);
spin_unlock_irqrestore(&div->lock, flags);
}
return hw;
}
@ -264,7 +239,6 @@ static struct clk_hw *
lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
const struct lgm_clk_branch *list)
{
unsigned long flags;
struct clk_hw *hw;
hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
@ -273,12 +247,9 @@ lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
if (IS_ERR(hw))
return ERR_CAST(hw);
if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
spin_lock_irqsave(&ctx->lock, flags);
if (list->div_flags & CLOCK_FLAG_VAL_INIT)
lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
list->div_width, list->div_val);
spin_unlock_irqrestore(&ctx->lock, flags);
}
return hw;
}
@ -286,13 +257,10 @@ lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
static int lgm_clk_gate_enable(struct clk_hw *hw)
{
struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
unsigned long flags;
unsigned int reg;
spin_lock_irqsave(&gate->lock, flags);
reg = GATE_HW_REG_EN(gate->reg);
lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
spin_unlock_irqrestore(&gate->lock, flags);
return 0;
}
@ -300,25 +268,19 @@ static int lgm_clk_gate_enable(struct clk_hw *hw)
static void lgm_clk_gate_disable(struct clk_hw *hw)
{
struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
unsigned long flags;
unsigned int reg;
spin_lock_irqsave(&gate->lock, flags);
reg = GATE_HW_REG_DIS(gate->reg);
lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
spin_unlock_irqrestore(&gate->lock, flags);
}
static int lgm_clk_gate_is_enabled(struct clk_hw *hw)
{
struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
unsigned int reg, ret;
unsigned long flags;
spin_lock_irqsave(&gate->lock, flags);
reg = GATE_HW_REG_STAT(gate->reg);
ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1);
spin_unlock_irqrestore(&gate->lock, flags);
return ret;
}
@ -333,7 +295,7 @@ static struct clk_hw *
lgm_clk_register_gate(struct lgm_clk_provider *ctx,
const struct lgm_clk_branch *list)
{
unsigned long flags, cflags = list->gate_flags;
unsigned long cflags = list->gate_flags;
const char *pname = list->parent_data[0].name;
struct device *dev = ctx->dev;
u8 shift = list->gate_shift;
@ -354,7 +316,6 @@ lgm_clk_register_gate(struct lgm_clk_provider *ctx,
init.num_parents = pname ? 1 : 0;
gate->membase = ctx->membase;
gate->lock = ctx->lock;
gate->reg = reg;
gate->shift = shift;
gate->flags = cflags;
@ -366,9 +327,7 @@ lgm_clk_register_gate(struct lgm_clk_provider *ctx,
return ERR_PTR(ret);
if (cflags & CLOCK_FLAG_VAL_INIT) {
spin_lock_irqsave(&gate->lock, flags);
lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val);
spin_unlock_irqrestore(&gate->lock, flags);
}
return hw;
@ -396,8 +355,22 @@ int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
hw = lgm_clk_register_fixed_factor(ctx, list);
break;
case CLK_TYPE_GATE:
hw = lgm_clk_register_gate(ctx, list);
if (list->gate_flags & GATE_CLK_HW) {
hw = lgm_clk_register_gate(ctx, list);
} else {
/*
* GATE_CLKs can be controlled either from
* CGU clk driver i.e. this driver or directly
* from power management driver/daemon. It is
* dependent on the power policy/profile requirements
* of the end product. To override control of gate
* clks from this driver, provide NULL for this index
* of gate clk provider.
*/
hw = NULL;
}
break;
default:
dev_err(ctx->dev, "invalid clk type\n");
return -EINVAL;
@ -443,24 +416,18 @@ lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
static int lgm_clk_ddiv_enable(struct clk_hw *hw)
{
struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
unsigned long flags;
spin_lock_irqsave(&ddiv->lock, flags);
lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
ddiv->width_gate, 1);
spin_unlock_irqrestore(&ddiv->lock, flags);
return 0;
}
static void lgm_clk_ddiv_disable(struct clk_hw *hw)
{
struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
unsigned long flags;
spin_lock_irqsave(&ddiv->lock, flags);
lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
ddiv->width_gate, 0);
spin_unlock_irqrestore(&ddiv->lock, flags);
}
static int
@ -497,32 +464,25 @@ lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
u32 div, ddiv1, ddiv2;
unsigned long flags;
div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate);
spin_lock_irqsave(&ddiv->lock, flags);
if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
div = div * 2;
}
if (div <= 0) {
spin_unlock_irqrestore(&ddiv->lock, flags);
if (div <= 0)
return -EINVAL;
}
if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) {
spin_unlock_irqrestore(&ddiv->lock, flags);
if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2))
return -EINVAL;
}
lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0,
ddiv1 - 1);
lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1,
ddiv2 - 1);
spin_unlock_irqrestore(&ddiv->lock, flags);
return 0;
}
@ -533,18 +493,15 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
{
struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
u32 div, ddiv1, ddiv2;
unsigned long flags;
u64 rate64;
div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate);
/* if predivide bit is enabled, modify div by factor of 2.5 */
spin_lock_irqsave(&ddiv->lock, flags);
if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
div = div * 2;
div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
}
spin_unlock_irqrestore(&ddiv->lock, flags);
if (div <= 0)
return *prate;
@ -558,12 +515,10 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
do_div(rate64, ddiv2);
/* if predivide bit is enabled, modify rounded rate by factor of 2.5 */
spin_lock_irqsave(&ddiv->lock, flags);
if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {