mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-23 07:23:12 +02:00
This is the 6.6.59 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmckKAsACgkQONu9yGCS aT5ctRAAoH0AgV4A6U8J2+RoY2aQW7hfCNdsC/jCvP6jGyjuJhJ7qGsqcn2c9qJk IzjuU0DYll52IJqU9Rt9+BikUTb9eZcRckCbFaoCNY3y3A1vNlZHjjIxhJVpODad oGJnnLmIiq0HuFGqFtfmvtLDD26kx0co5RHyeu897X1I97sWjp9Q7lKu3rSD2+6F XYKbB1iEJ+ZryCojq/rHG/CWXNae6t7P7rsbBMYMYAK/V+daFx3d0LdWdVh0zLL7 RZSKxFokYuGxJhQjYXvj3WbIF8/gsQ4N8BdsFP+/sQLqi86EZnS47tVDL44Adn1W TTEkClc/LA0i4Gn7k0A98jVlNfChZpYnszqrDy8WSczVtQGwN6vNHtl/MsMfjceU dXPeDv2v1vpvbdUdrl9WMpzhGAru5T1KGlpsrN7iLoqWBD8ZXHA/aUvwihtUbgWf d/7rnE2uDTuLMRPcleVIDUwMLhrm5zXGwUHNevUH6ISyyGINa73qv5UGNBdD1sPS ZXcds+c3o5zzoVoAM/9FNqR4bkq5TB0ZmfKudPbEBAxpWhUglo6iXj4eBr/nwZGy Nl9Qywb4kK91RL5iUGhRc1fAsHOYEl5mvNaDqBB2WN2GIddXeOAcmjkTDytldKIS akQ6Nz0nzMurZXvy+vA3pdwMFssSkQnGh+3shp7f6CRDtKgWVSM= =eUGZ -----END PGP SIGNATURE----- Merge tag 'v6.6.59' into v6.6/standard/base This is the 6.6.59 stable release # -----BEGIN PGP SIGNATURE----- # # iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmckKAsACgkQONu9yGCS # aT5ctRAAoH0AgV4A6U8J2+RoY2aQW7hfCNdsC/jCvP6jGyjuJhJ7qGsqcn2c9qJk # IzjuU0DYll52IJqU9Rt9+BikUTb9eZcRckCbFaoCNY3y3A1vNlZHjjIxhJVpODad # oGJnnLmIiq0HuFGqFtfmvtLDD26kx0co5RHyeu897X1I97sWjp9Q7lKu3rSD2+6F # XYKbB1iEJ+ZryCojq/rHG/CWXNae6t7P7rsbBMYMYAK/V+daFx3d0LdWdVh0zLL7 # RZSKxFokYuGxJhQjYXvj3WbIF8/gsQ4N8BdsFP+/sQLqi86EZnS47tVDL44Adn1W # TTEkClc/LA0i4Gn7k0A98jVlNfChZpYnszqrDy8WSczVtQGwN6vNHtl/MsMfjceU # dXPeDv2v1vpvbdUdrl9WMpzhGAru5T1KGlpsrN7iLoqWBD8ZXHA/aUvwihtUbgWf # d/7rnE2uDTuLMRPcleVIDUwMLhrm5zXGwUHNevUH6ISyyGINa73qv5UGNBdD1sPS # ZXcds+c3o5zzoVoAM/9FNqR4bkq5TB0ZmfKudPbEBAxpWhUglo6iXj4eBr/nwZGy # Nl9Qywb4kK91RL5iUGhRc1fAsHOYEl5mvNaDqBB2WN2GIddXeOAcmjkTDytldKIS # akQ6Nz0nzMurZXvy+vA3pdwMFssSkQnGh+3shp7f6CRDtKgWVSM= # =eUGZ # -----END PGP SIGNATURE----- # gpg: Signature made Thu 31 Oct 2024 08:59:55 PM EDT # gpg: using RSA key 647F28654894E3BD457199BE38DBBDC86092693E # gpg: Can't check signature: No public key
This commit is contained in:
commit
e558aca87e
|
@ -102,21 +102,21 @@ properties:
|
|||
default: 2
|
||||
|
||||
interrupts:
|
||||
anyOf:
|
||||
- minItems: 1
|
||||
items:
|
||||
- description: TX interrupt
|
||||
- description: RX interrupt
|
||||
- items:
|
||||
- description: common/combined interrupt
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
interrupt-names:
|
||||
oneOf:
|
||||
- minItems: 1
|
||||
- description: TX interrupt
|
||||
const: tx
|
||||
- description: RX interrupt
|
||||
const: rx
|
||||
- description: TX and RX interrupts
|
||||
items:
|
||||
- const: tx
|
||||
- const: rx
|
||||
- const: common
|
||||
- description: Common/combined interrupt
|
||||
const: common
|
||||
|
||||
fck_parent:
|
||||
$ref: /schemas/types.yaml#/definitions/string
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 58
|
||||
SUBLEVEL = 59
|
||||
EXTRAVERSION =
|
||||
NAME = Pinguïn Aangedreven
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@
|
|||
};
|
||||
|
||||
&hdmi {
|
||||
hpd-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
|
||||
hpd-gpios = <&expgpio 0 GPIO_ACTIVE_LOW>;
|
||||
power-domains = <&power RPI_POWER_DOMAIN_HDMI>;
|
||||
status = "okay";
|
||||
};
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#
|
||||
# Copyright (C) 1995-2001 by Russell King
|
||||
|
||||
LDFLAGS_vmlinux :=--no-undefined -X
|
||||
LDFLAGS_vmlinux :=--no-undefined -X --pic-veneer
|
||||
|
||||
ifeq ($(CONFIG_RELOCATABLE), y)
|
||||
# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
|
||||
|
|
|
@ -777,6 +777,9 @@ static int kvm_vcpu_suspend(struct kvm_vcpu *vcpu)
|
|||
static int check_vcpu_requests(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_request_pending(vcpu)) {
|
||||
if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu))
|
||||
return -EIO;
|
||||
|
||||
if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
|
||||
kvm_vcpu_sleep(vcpu);
|
||||
|
||||
|
|
|
@ -1708,7 +1708,7 @@ static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
* one cache line.
|
||||
*/
|
||||
if (kvm_has_mte(vcpu->kvm))
|
||||
clidr |= 2 << CLIDR_TTYPE_SHIFT(loc);
|
||||
clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc);
|
||||
|
||||
__vcpu_sys_reg(vcpu, r->reg) = clidr;
|
||||
|
||||
|
|
|
@ -494,10 +494,10 @@ int kvm_vgic_map_resources(struct kvm *kvm)
|
|||
out:
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
out_slots:
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
if (ret)
|
||||
kvm_vgic_destroy(kvm);
|
||||
kvm_vm_dead(kvm);
|
||||
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -24,6 +24,10 @@ struct loongson_board_info {
|
|||
const char *board_vendor;
|
||||
};
|
||||
|
||||
/*
|
||||
* The "core" of cores_per_node and cores_per_package stands for a
|
||||
* logical core, which means in a SMT system it stands for a thread.
|
||||
*/
|
||||
struct loongson_system_configuration {
|
||||
int nr_cpus;
|
||||
int nr_nodes;
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#define XRANGE_SHIFT (48)
|
||||
|
||||
/* Valid address length */
|
||||
#define XRANGE_SHADOW_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
|
||||
#define XRANGE_SHADOW_SHIFT min(cpu_vabits, VA_BITS)
|
||||
/* Used for taking out the valid address */
|
||||
#define XRANGE_SHADOW_MASK GENMASK_ULL(XRANGE_SHADOW_SHIFT - 1, 0)
|
||||
/* One segment whole address space size */
|
||||
|
|
|
@ -293,13 +293,15 @@ unsigned long stack_top(void)
|
|||
{
|
||||
unsigned long top = TASK_SIZE & PAGE_MASK;
|
||||
|
||||
/* Space for the VDSO & data page */
|
||||
top -= PAGE_ALIGN(current->thread.vdso->size);
|
||||
top -= VVAR_SIZE;
|
||||
if (current->thread.vdso) {
|
||||
/* Space for the VDSO & data page */
|
||||
top -= PAGE_ALIGN(current->thread.vdso->size);
|
||||
top -= VVAR_SIZE;
|
||||
|
||||
/* Space to randomize the VDSO base */
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
top -= VDSO_RANDOMIZE_SIZE;
|
||||
/* Space to randomize the VDSO base */
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
top -= VDSO_RANDOMIZE_SIZE;
|
||||
}
|
||||
|
||||
return top;
|
||||
}
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
#define SMBIOS_FREQHIGH_OFFSET 0x17
|
||||
#define SMBIOS_FREQLOW_MASK 0xFF
|
||||
#define SMBIOS_CORE_PACKAGE_OFFSET 0x23
|
||||
#define SMBIOS_THREAD_PACKAGE_OFFSET 0x25
|
||||
#define LOONGSON_EFI_ENABLE (1 << 3)
|
||||
|
||||
#ifdef CONFIG_EFI
|
||||
|
@ -129,7 +130,7 @@ static void __init parse_cpu_table(const struct dmi_header *dm)
|
|||
cpu_clock_freq = freq_temp * 1000000;
|
||||
|
||||
loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]);
|
||||
loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_CORE_PACKAGE_OFFSET);
|
||||
loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET);
|
||||
|
||||
pr_info("CpuClock = %llu\n", cpu_clock_freq);
|
||||
}
|
||||
|
|
|
@ -529,6 +529,9 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
|
|||
#else
|
||||
unsigned int *pc;
|
||||
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
local_irq_enable();
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
|
||||
|
||||
/*
|
||||
|
@ -553,6 +556,8 @@ sigbus:
|
|||
die_if_kernel("Kernel ale access", regs);
|
||||
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
|
||||
out:
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
local_irq_disable();
|
||||
#endif
|
||||
irqentry_exit(regs, state);
|
||||
}
|
||||
|
|
|
@ -555,8 +555,8 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
|
|||
rv_lr_w(r0, 0, rd, 0, 0), ctx);
|
||||
jmp_offset = ninsns_rvoff(8);
|
||||
emit(rv_bne(RV_REG_T2, r0, jmp_offset >> 1), ctx);
|
||||
emit(is64 ? rv_sc_d(RV_REG_T3, rs, rd, 0, 0) :
|
||||
rv_sc_w(RV_REG_T3, rs, rd, 0, 0), ctx);
|
||||
emit(is64 ? rv_sc_d(RV_REG_T3, rs, rd, 0, 1) :
|
||||
rv_sc_w(RV_REG_T3, rs, rd, 0, 1), ctx);
|
||||
jmp_offset = ninsns_rvoff(-6);
|
||||
emit(rv_bne(RV_REG_T3, 0, jmp_offset >> 1), ctx);
|
||||
emit(rv_fence(0x3, 0x3), ctx);
|
||||
|
|
|
@ -73,6 +73,7 @@ struct perf_sf_sde_regs {
|
|||
#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
|
||||
|
||||
#define perf_arch_fetch_caller_regs(regs, __ip) do { \
|
||||
(regs)->psw.mask = 0; \
|
||||
(regs)->psw.addr = (__ip); \
|
||||
(regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \
|
||||
offsetof(struct stack_frame, back_chain); \
|
||||
|
|
|
@ -272,18 +272,19 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
|
|||
goto no_pdev;
|
||||
|
||||
switch (ccdf->pec) {
|
||||
case 0x003a: /* Service Action or Error Recovery Successful */
|
||||
case 0x002a: /* Error event concerns FMB */
|
||||
case 0x002b:
|
||||
case 0x002c:
|
||||
break;
|
||||
case 0x0040: /* Service Action or Error Recovery Failed */
|
||||
case 0x003b:
|
||||
zpci_event_io_failure(pdev, pci_channel_io_perm_failure);
|
||||
break;
|
||||
default: /* PCI function left in the error state attempt to recover */
|
||||
ers_res = zpci_event_attempt_error_recovery(pdev);
|
||||
if (ers_res != PCI_ERS_RESULT_RECOVERED)
|
||||
zpci_event_io_failure(pdev, pci_channel_io_perm_failure);
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* Mark as frozen not permanently failed because the device
|
||||
* could be subsequently recovered by the platform.
|
||||
*/
|
||||
zpci_event_io_failure(pdev, pci_channel_io_frozen);
|
||||
break;
|
||||
}
|
||||
pci_dev_put(pdev);
|
||||
no_pdev:
|
||||
|
|
|
@ -2223,6 +2223,7 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
|
|||
config ADDRESS_MASKING
|
||||
bool "Linear Address Masking support"
|
||||
depends on X86_64
|
||||
depends on COMPILE_TEST || !CPU_MITIGATIONS # wait for LASS
|
||||
help
|
||||
Linear Address Masking (LAM) modifies the checking that is applied
|
||||
to 64-bit linear addresses, allowing software to use of the
|
||||
|
|
|
@ -27,10 +27,10 @@
|
|||
* hardware. The allocated bandwidth percentage is rounded to the next
|
||||
* control step available on the hardware.
|
||||
*/
|
||||
static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
|
||||
static bool bw_validate(char *buf, u32 *data, struct rdt_resource *r)
|
||||
{
|
||||
unsigned long bw;
|
||||
int ret;
|
||||
u32 bw;
|
||||
|
||||
/*
|
||||
* Only linear delay values is supported for current Intel SKUs.
|
||||
|
@ -40,16 +40,21 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
|
|||
return false;
|
||||
}
|
||||
|
||||
ret = kstrtoul(buf, 10, &bw);
|
||||
ret = kstrtou32(buf, 10, &bw);
|
||||
if (ret) {
|
||||
rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
|
||||
rdt_last_cmd_printf("Invalid MB value %s\n", buf);
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((bw < r->membw.min_bw || bw > r->default_ctrl) &&
|
||||
!is_mba_sc(r)) {
|
||||
rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
|
||||
r->membw.min_bw, r->default_ctrl);
|
||||
/* Nothing else to do if software controller is enabled. */
|
||||
if (is_mba_sc(r)) {
|
||||
*data = bw;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (bw < r->membw.min_bw || bw > r->default_ctrl) {
|
||||
rdt_last_cmd_printf("MB value %u out of range [%d,%d]\n",
|
||||
bw, r->membw.min_bw, r->default_ctrl);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -63,7 +68,7 @@ int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
|
|||
struct resctrl_staged_config *cfg;
|
||||
u32 closid = data->rdtgrp->closid;
|
||||
struct rdt_resource *r = s->res;
|
||||
unsigned long bw_val;
|
||||
u32 bw_val;
|
||||
|
||||
cfg = &d->staged_config[s->conf_type];
|
||||
if (cfg->have_new_ctrl) {
|
||||
|
|
|
@ -63,8 +63,12 @@ static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
|
|||
u64 pdpte;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Note, nCR3 is "assumed" to be 32-byte aligned, i.e. the CPU ignores
|
||||
* nCR3[4:0] when loading PDPTEs from memory.
|
||||
*/
|
||||
ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
|
||||
offset_in_page(cr3) + index * 8, 8);
|
||||
(cr3 & GENMASK(11, 5)) + index * 8, 8);
|
||||
if (ret)
|
||||
return 0;
|
||||
return pdpte;
|
||||
|
|
|
@ -496,7 +496,7 @@ static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wr
|
|||
nents = sgt->nents;
|
||||
nents_dma = nents;
|
||||
*size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
|
||||
for_each_sgtable_sg(sgt, sg, i) {
|
||||
for_each_sgtable_dma_sg(sgt, sg, i) {
|
||||
*size -= sizeof(*asp);
|
||||
/* Save 1K for possible follow-up transactions. */
|
||||
if (*size < SZ_1K) {
|
||||
|
|
|
@ -177,7 +177,7 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
|
|||
nents = 0;
|
||||
|
||||
size = size ? size : PAGE_SIZE;
|
||||
for (sg = sgt_in->sgl; sg; sg = sg_next(sg)) {
|
||||
for_each_sgtable_dma_sg(sgt_in, sg, j) {
|
||||
len = sg_dma_len(sg);
|
||||
|
||||
if (!len)
|
||||
|
@ -214,7 +214,7 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
|
|||
|
||||
/* copy relevant sg node and fix page and length */
|
||||
sgn = sgf;
|
||||
for_each_sgtable_sg(sgt, sg, j) {
|
||||
for_each_sgtable_dma_sg(sgt, sg, j) {
|
||||
memcpy(sg, sgn, sizeof(*sg));
|
||||
if (sgn == sgf) {
|
||||
sg_dma_address(sg) += offf;
|
||||
|
@ -294,7 +294,7 @@ static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice,
|
|||
* fence.
|
||||
*/
|
||||
dev_addr = req->dev_addr;
|
||||
for_each_sgtable_sg(slice->sgt, sg, i) {
|
||||
for_each_sgtable_dma_sg(slice->sgt, sg, i) {
|
||||
slice->reqs[i].cmd = cmd;
|
||||
slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
|
||||
sg_dma_address(sg) : dev_addr);
|
||||
|
|
|
@ -130,6 +130,17 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
|
|||
},
|
||||
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
|
||||
},
|
||||
{
|
||||
/*
|
||||
* Samsung galaxybook2 ,initial _LID device notification returns
|
||||
* lid closed.
|
||||
*/
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "750XED"),
|
||||
},
|
||||
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -39,6 +39,9 @@
|
|||
#include <linux/rwsem.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/topology.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/units.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include <acpi/cppc_acpi.h>
|
||||
|
||||
|
@ -1858,3 +1861,116 @@ unsigned int cppc_get_transition_latency(int cpu_num)
|
|||
return latency_ns;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
|
||||
|
||||
/* Minimum struct length needed for the DMI processor entry we want */
|
||||
#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
|
||||
|
||||
/* Offset in the DMI processor structure for the max frequency */
|
||||
#define DMI_PROCESSOR_MAX_SPEED 0x14
|
||||
|
||||
/* Callback function used to retrieve the max frequency from DMI */
|
||||
static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
|
||||
{
|
||||
const u8 *dmi_data = (const u8 *)dm;
|
||||
u16 *mhz = (u16 *)private;
|
||||
|
||||
if (dm->type == DMI_ENTRY_PROCESSOR &&
|
||||
dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
|
||||
u16 val = (u16)get_unaligned((const u16 *)
|
||||
(dmi_data + DMI_PROCESSOR_MAX_SPEED));
|
||||
*mhz = val > *mhz ? val : *mhz;
|
||||
}
|
||||
}
|
||||
|
||||
/* Look up the max frequency in DMI */
|
||||
static u64 cppc_get_dmi_max_khz(void)
|
||||
{
|
||||
u16 mhz = 0;
|
||||
|
||||
dmi_walk(cppc_find_dmi_mhz, &mhz);
|
||||
|
||||
/*
|
||||
* Real stupid fallback value, just in case there is no
|
||||
* actual value set.
|
||||
*/
|
||||
mhz = mhz ? mhz : 1;
|
||||
|
||||
return KHZ_PER_MHZ * mhz;
|
||||
}
|
||||
|
||||
/*
|
||||
* If CPPC lowest_freq and nominal_freq registers are exposed then we can
|
||||
* use them to convert perf to freq and vice versa. The conversion is
|
||||
* extrapolated as an affine function passing by the 2 points:
|
||||
* - (Low perf, Low freq)
|
||||
* - (Nominal perf, Nominal freq)
|
||||
*/
|
||||
unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf)
|
||||
{
|
||||
s64 retval, offset = 0;
|
||||
static u64 max_khz;
|
||||
u64 mul, div;
|
||||
|
||||
if (caps->lowest_freq && caps->nominal_freq) {
|
||||
/* Avoid special case when nominal_freq is equal to lowest_freq */
|
||||
if (caps->lowest_freq == caps->nominal_freq) {
|
||||
mul = caps->nominal_freq;
|
||||
div = caps->nominal_perf;
|
||||
} else {
|
||||
mul = caps->nominal_freq - caps->lowest_freq;
|
||||
div = caps->nominal_perf - caps->lowest_perf;
|
||||
}
|
||||
mul *= KHZ_PER_MHZ;
|
||||
offset = caps->nominal_freq * KHZ_PER_MHZ -
|
||||
div64_u64(caps->nominal_perf * mul, div);
|
||||
} else {
|
||||
if (!max_khz)
|
||||
max_khz = cppc_get_dmi_max_khz();
|
||||
mul = max_khz;
|
||||
div = caps->highest_perf;
|
||||
}
|
||||
|
||||
retval = offset + div64_u64(perf * mul, div);
|
||||
if (retval >= 0)
|
||||
return retval;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cppc_perf_to_khz);
|
||||
|
||||
unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq)
|
||||
{
|
||||
s64 retval, offset = 0;
|
||||
static u64 max_khz;
|
||||
u64 mul, div;
|
||||
|
||||
if (caps->lowest_freq && caps->nominal_freq) {
|
||||
/* Avoid special case when nominal_freq is equal to lowest_freq */
|
||||
if (caps->lowest_freq == caps->nominal_freq) {
|
||||
mul = caps->nominal_perf;
|
||||
div = caps->nominal_freq;
|
||||
} else {
|
||||
mul = caps->nominal_perf - caps->lowest_perf;
|
||||
div = caps->nominal_freq - caps->lowest_freq;
|
||||
}
|
||||
/*
|
||||
* We don't need to convert to kHz for computing offset and can
|
||||
* directly use nominal_freq and lowest_freq as the div64_u64
|
||||
* will remove the frequency unit.
|
||||
*/
|
||||
offset = caps->nominal_perf -
|
||||
div64_u64(caps->nominal_freq * mul, div);
|
||||
/* But we need it for computing the perf level. */
|
||||
div *= KHZ_PER_MHZ;
|
||||
} else {
|
||||
if (!max_khz)
|
||||
max_khz = cppc_get_dmi_max_khz();
|
||||
mul = caps->highest_perf;
|
||||
div = max_khz;
|
||||
}
|
||||
|
||||
retval = offset + div64_u64(freq * mul, div);
|
||||
if (retval >= 0)
|
||||
return retval;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cppc_khz_to_perf);
|
||||
|
|
|
@ -52,7 +52,7 @@ struct prm_context_buffer {
|
|||
static LIST_HEAD(prm_module_list);
|
||||
|
||||
struct prm_handler_info {
|
||||
guid_t guid;
|
||||
efi_guid_t guid;
|
||||
efi_status_t (__efiapi *handler_addr)(u64, void *);
|
||||
u64 static_data_buffer_addr;
|
||||
u64 acpi_param_buffer_addr;
|
||||
|
@ -72,17 +72,21 @@ struct prm_module_info {
|
|||
struct prm_handler_info handlers[];
|
||||
};
|
||||
|
||||
static u64 efi_pa_va_lookup(u64 pa)
|
||||
static u64 efi_pa_va_lookup(efi_guid_t *guid, u64 pa)
|
||||
{
|
||||
efi_memory_desc_t *md;
|
||||
u64 pa_offset = pa & ~PAGE_MASK;
|
||||
u64 page = pa & PAGE_MASK;
|
||||
|
||||
for_each_efi_memory_desc(md) {
|
||||
if (md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)
|
||||
if ((md->attribute & EFI_MEMORY_RUNTIME) &&
|
||||
(md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)) {
|
||||
return pa_offset + md->virt_addr + page - md->phys_addr;
|
||||
}
|
||||
}
|
||||
|
||||
pr_warn("Failed to find VA for GUID: %pUL, PA: 0x%llx", guid, pa);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -148,9 +152,15 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
|
|||
th = &tm->handlers[cur_handler];
|
||||
|
||||
guid_copy(&th->guid, (guid_t *)handler_info->handler_guid);
|
||||
th->handler_addr = (void *)efi_pa_va_lookup(handler_info->handler_address);
|
||||
th->static_data_buffer_addr = efi_pa_va_lookup(handler_info->static_data_buffer_address);
|
||||
th->acpi_param_buffer_addr = efi_pa_va_lookup(handler_info->acpi_param_buffer_address);
|
||||
th->handler_addr =
|
||||
(void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address);
|
||||
|
||||
th->static_data_buffer_addr =
|
||||
efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address);
|
||||
|
||||
th->acpi_param_buffer_addr =
|
||||
efi_pa_va_lookup(&th->guid, handler_info->acpi_param_buffer_address);
|
||||
|
||||
} while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
|
||||
|
||||
return 0;
|
||||
|
@ -253,6 +263,13 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
|
|||
if (!handler || !module)
|
||||
goto invalid_guid;
|
||||
|
||||
if (!handler->handler_addr ||
|
||||
!handler->static_data_buffer_addr ||
|
||||
!handler->acpi_param_buffer_addr) {
|
||||
buffer->prm_status = PRM_HANDLER_ERROR;
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ACPI_COPY_NAMESEG(context.signature, "PRMC");
|
||||
context.revision = 0x0;
|
||||
context.reserved = 0x0;
|
||||
|
|
|
@ -498,6 +498,13 @@ static const struct dmi_system_id tongfang_gm_rg[] = {
|
|||
DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* LG Electronics 16T90SP */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "16T90SP"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
|
@ -636,6 +636,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
|
|||
/* the scmd has an associated qc */
|
||||
if (!(qc->flags & ATA_QCFLAG_EH)) {
|
||||
/* which hasn't failed yet, timeout */
|
||||
set_host_byte(scmd, DID_TIME_OUT);
|
||||
qc->err_mask |= AC_ERR_TIMEOUT;
|
||||
qc->flags |= ATA_QCFLAG_EH;
|
||||
nr_timedout++;
|
||||
|
|
|
@ -2313,7 +2313,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
|
|||
return -EINVAL;
|
||||
|
||||
/* Prevent arg from speculatively bypassing the length check */
|
||||
barrier_nospec();
|
||||
arg = array_index_nospec(arg, cdi->capacity);
|
||||
|
||||
info = kmalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (!info)
|
||||
|
|
|
@ -1061,11 +1061,21 @@ static int amd_pstate_register_driver(int mode)
|
|||
return -EINVAL;
|
||||
|
||||
cppc_state = mode;
|
||||
|
||||
ret = amd_pstate_enable(true);
|
||||
if (ret) {
|
||||
pr_err("failed to enable cppc during amd-pstate driver registration, return %d\n",
|
||||
ret);
|
||||
amd_pstate_driver_cleanup();
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = cpufreq_register_driver(current_pstate_driver);
|
||||
if (ret) {
|
||||
amd_pstate_driver_cleanup();
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/time.h>
|
||||
|
@ -27,12 +26,6 @@
|
|||
|
||||
#include <acpi/cppc_acpi.h>
|
||||
|
||||
/* Minimum struct length needed for the DMI processor entry we want */
|
||||
#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
|
||||
|
||||
/* Offset in the DMI processor structure for the max frequency */
|
||||
#define DMI_PROCESSOR_MAX_SPEED 0x14
|
||||
|
||||
/*
|
||||
* This list contains information parsed from per CPU ACPI _CPC and _PSD
|
||||
* structures: e.g. the highest and lowest supported performance, capabilities,
|
||||
|
@ -291,97 +284,9 @@ static inline void cppc_freq_invariance_exit(void)
|
|||
}
|
||||
#endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
|
||||
|
||||
/* Callback function used to retrieve the max frequency from DMI */
|
||||
static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
|
||||
{
|
||||
const u8 *dmi_data = (const u8 *)dm;
|
||||
u16 *mhz = (u16 *)private;
|
||||
|
||||
if (dm->type == DMI_ENTRY_PROCESSOR &&
|
||||
dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
|
||||
u16 val = (u16)get_unaligned((const u16 *)
|
||||
(dmi_data + DMI_PROCESSOR_MAX_SPEED));
|
||||
*mhz = val > *mhz ? val : *mhz;
|
||||
}
|
||||
}
|
||||
|
||||
/* Look up the max frequency in DMI */
|
||||
static u64 cppc_get_dmi_max_khz(void)
|
||||
{
|
||||
u16 mhz = 0;
|
||||
|
||||
dmi_walk(cppc_find_dmi_mhz, &mhz);
|
||||
|
||||
/*
|
||||
* Real stupid fallback value, just in case there is no
|
||||
* actual value set.
|
||||
*/
|
||||
mhz = mhz ? mhz : 1;
|
||||
|
||||
return (1000 * mhz);
|
||||
}
|
||||
|
||||
/*
|
||||
* If CPPC lowest_freq and nominal_freq registers are exposed then we can
|
||||
* use them to convert perf to freq and vice versa. The conversion is
|
||||
* extrapolated as an affine function passing by the 2 points:
|
||||
* - (Low perf, Low freq)
|
||||
* - (Nominal perf, Nominal perf)
|
||||
*/
|
||||
static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data,
|
||||
unsigned int perf)
|
||||
{
|
||||
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
|
||||
s64 retval, offset = 0;
|
||||
static u64 max_khz;
|
||||
u64 mul, div;
|
||||
|
||||
if (caps->lowest_freq && caps->nominal_freq) {
|
||||
mul = caps->nominal_freq - caps->lowest_freq;
|
||||
div = caps->nominal_perf - caps->lowest_perf;
|
||||
offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div);
|
||||
} else {
|
||||
if (!max_khz)
|
||||
max_khz = cppc_get_dmi_max_khz();
|
||||
mul = max_khz;
|
||||
div = caps->highest_perf;
|
||||
}
|
||||
|
||||
retval = offset + div64_u64(perf * mul, div);
|
||||
if (retval >= 0)
|
||||
return retval;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
|
||||
unsigned int freq)
|
||||
{
|
||||
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
|
||||
s64 retval, offset = 0;
|
||||
static u64 max_khz;
|
||||
u64 mul, div;
|
||||
|
||||
if (caps->lowest_freq && caps->nominal_freq) {
|
||||
mul = caps->nominal_perf - caps->lowest_perf;
|
||||
div = caps->nominal_freq - caps->lowest_freq;
|
||||
offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div);
|
||||
} else {
|
||||
if (!max_khz)
|
||||
max_khz = cppc_get_dmi_max_khz();
|
||||
mul = caps->highest_perf;
|
||||
div = max_khz;
|
||||
}
|
||||
|
||||
retval = offset + div64_u64(freq * mul, div);
|
||||
if (retval >= 0)
|
||||
return retval;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation)
|
||||
|
||||
{
|
||||
struct cppc_cpudata *cpu_data = policy->driver_data;
|
||||
unsigned int cpu = policy->cpu;
|
||||
|
@ -389,7 +294,7 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
|
|||
u32 desired_perf;
|
||||
int ret = 0;
|
||||
|
||||
desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
|
||||
desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
|
||||
/* Return if it is exactly the same perf */
|
||||
if (desired_perf == cpu_data->perf_ctrls.desired_perf)
|
||||
return ret;
|
||||
|
@ -417,7 +322,7 @@ static unsigned int cppc_cpufreq_fast_switch(struct cpufreq_policy *policy,
|
|||
u32 desired_perf;
|
||||
int ret;
|
||||
|
||||
desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
|
||||
desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
|
||||
cpu_data->perf_ctrls.desired_perf = desired_perf;
|
||||
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
|
||||
|
||||
|
@ -530,7 +435,7 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
|
|||
min_step = min_cap / CPPC_EM_CAP_STEP;
|
||||
max_step = max_cap / CPPC_EM_CAP_STEP;
|
||||
|
||||
perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
|
||||
perf_prev = cppc_khz_to_perf(perf_caps, *KHz);
|
||||
step = perf_prev / perf_step;
|
||||
|
||||
if (step > max_step)
|
||||
|
@ -550,8 +455,8 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
|
|||
perf = step * perf_step;
|
||||
}
|
||||
|
||||
*KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
|
||||
perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
|
||||
*KHz = cppc_perf_to_khz(perf_caps, perf);
|
||||
perf_check = cppc_khz_to_perf(perf_caps, *KHz);
|
||||
step_check = perf_check / perf_step;
|
||||
|
||||
/*
|
||||
|
@ -561,8 +466,8 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
|
|||
*/
|
||||
while ((*KHz == prev_freq) || (step_check != step)) {
|
||||
perf++;
|
||||
*KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
|
||||
perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
|
||||
*KHz = cppc_perf_to_khz(perf_caps, perf);
|
||||
perf_check = cppc_khz_to_perf(perf_caps, *KHz);
|
||||
step_check = perf_check / perf_step;
|
||||
}
|
||||
|
||||
|
@ -591,7 +496,7 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
|
|||
perf_caps = &cpu_data->perf_caps;
|
||||
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
|
||||
|
||||
perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, KHz);
|
||||
perf_prev = cppc_khz_to_perf(perf_caps, KHz);
|
||||
perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
|
||||
step = perf_prev / perf_step;
|
||||
|
||||
|
@ -679,10 +584,6 @@ static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
|
|||
goto free_mask;
|
||||
}
|
||||
|
||||
/* Convert the lowest and nominal freq from MHz to KHz */
|
||||
cpu_data->perf_caps.lowest_freq *= 1000;
|
||||
cpu_data->perf_caps.nominal_freq *= 1000;
|
||||
|
||||
list_add(&cpu_data->node, &cpu_data_list);
|
||||
|
||||
return cpu_data;
|
||||
|
@ -724,20 +625,16 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
* Set min to lowest nonlinear perf to avoid any efficiency penalty (see
|
||||
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
|
||||
*/
|
||||
policy->min = cppc_cpufreq_perf_to_khz(cpu_data,
|
||||
caps->lowest_nonlinear_perf);
|
||||
policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
|
||||
caps->nominal_perf);
|
||||
policy->min = cppc_perf_to_khz(caps, caps->lowest_nonlinear_perf);
|
||||
policy->max = cppc_perf_to_khz(caps, caps->nominal_perf);
|
||||
|
||||
/*
|
||||
* Set cpuinfo.min_freq to Lowest to make the full range of performance
|
||||
* available if userspace wants to use any perf between lowest & lowest
|
||||
* nonlinear perf
|
||||
*/
|
||||
policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data,
|
||||
caps->lowest_perf);
|
||||
policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data,
|
||||
caps->nominal_perf);
|
||||
policy->cpuinfo.min_freq = cppc_perf_to_khz(caps, caps->lowest_perf);
|
||||
policy->cpuinfo.max_freq = cppc_perf_to_khz(caps, caps->nominal_perf);
|
||||
|
||||
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
|
||||
policy->shared_type = cpu_data->shared_type;
|
||||
|
@ -773,7 +670,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
boost_supported = true;
|
||||
|
||||
/* Set policy->cur to max now. The governors will adjust later. */
|
||||
policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf);
|
||||
policy->cur = cppc_perf_to_khz(caps, caps->highest_perf);
|
||||
cpu_data->perf_ctrls.desired_perf = caps->highest_perf;
|
||||
|
||||
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
|
||||
|
@ -868,7 +765,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
|
|||
delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
|
||||
&fb_ctrs_t1);
|
||||
|
||||
return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
|
||||
return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf);
|
||||
}
|
||||
|
||||
static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
|
||||
|
@ -883,11 +780,9 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
|
|||
}
|
||||
|
||||
if (state)
|
||||
policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
|
||||
caps->highest_perf);
|
||||
policy->max = cppc_perf_to_khz(caps, caps->highest_perf);
|
||||
else
|
||||
policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
|
||||
caps->nominal_perf);
|
||||
policy->max = cppc_perf_to_khz(caps, caps->nominal_perf);
|
||||
policy->cpuinfo.max_freq = policy->max;
|
||||
|
||||
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
|
||||
|
@ -947,7 +842,7 @@ static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
|
|||
if (ret < 0)
|
||||
return -EIO;
|
||||
|
||||
return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf);
|
||||
return cppc_perf_to_khz(&cpu_data->perf_caps, desired_perf);
|
||||
}
|
||||
|
||||
static void cppc_check_hisi_workaround(void)
|
||||
|
|
|
@ -2603,10 +2603,8 @@ static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
|
|||
dbg->top_dentry = top_dentry;
|
||||
|
||||
if (devm_add_action_or_reset(info->dev,
|
||||
scmi_debugfs_common_cleanup, dbg)) {
|
||||
scmi_debugfs_common_cleanup(dbg);
|
||||
scmi_debugfs_common_cleanup, dbg))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return dbg;
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
* @chan_receiver: Optional Receiver mailbox unidirectional channel
|
||||
* @cinfo: SCMI channel info
|
||||
* @shmem: Transmit/Receive shared memory area
|
||||
* @chan_lock: Lock that prevents multiple xfers from being queued
|
||||
*/
|
||||
struct scmi_mailbox {
|
||||
struct mbox_client cl;
|
||||
|
@ -30,6 +31,7 @@ struct scmi_mailbox {
|
|||
struct mbox_chan *chan_receiver;
|
||||
struct scmi_chan_info *cinfo;
|
||||
struct scmi_shared_mem __iomem *shmem;
|
||||
struct mutex chan_lock;
|
||||
};
|
||||
|
||||
#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
|
||||
|
@ -228,6 +230,7 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
|
|||
|
||||
cinfo->transport_info = smbox;
|
||||
smbox->cinfo = cinfo;
|
||||
mutex_init(&smbox->chan_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -255,13 +258,23 @@ static int mailbox_send_message(struct scmi_chan_info *cinfo,
|
|||
struct scmi_mailbox *smbox = cinfo->transport_info;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* The mailbox layer has its own queue. However the mailbox queue
|
||||
* confuses the per message SCMI timeouts since the clock starts when
|
||||
* the message is submitted into the mailbox queue. So when multiple
|
||||
* messages are queued up the clock starts on all messages instead of
|
||||
* only the one inflight.
|
||||
*/
|
||||
mutex_lock(&smbox->chan_lock);
|
||||
|
||||
ret = mbox_send_message(smbox->chan, xfer);
|
||||
/* mbox_send_message returns non-negative value on success */
|
||||
if (ret < 0) {
|
||||
mutex_unlock(&smbox->chan_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* mbox_send_message returns non-negative value on success, so reset */
|
||||
if (ret > 0)
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret,
|
||||
|
@ -269,13 +282,10 @@ static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret,
|
|||
{
|
||||
struct scmi_mailbox *smbox = cinfo->transport_info;
|
||||
|
||||
/*
|
||||
* NOTE: we might prefer not to need the mailbox ticker to manage the
|
||||
* transfer queueing since the protocol layer queues things by itself.
|
||||
* Unfortunately, we have to kick the mailbox framework after we have
|
||||
* received our message.
|
||||
*/
|
||||
mbox_client_txdone(smbox->chan, ret);
|
||||
|
||||
/* Release channel */
|
||||
mutex_unlock(&smbox->chan_lock);
|
||||
}
|
||||
|
||||
static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
|
||||
|
|
|
@ -147,6 +147,7 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
|
|||
struct acpi_buffer *params)
|
||||
{
|
||||
acpi_status status;
|
||||
union acpi_object *obj;
|
||||
union acpi_object atif_arg_elements[2];
|
||||
struct acpi_object_list atif_arg;
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
|
@ -169,16 +170,24 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
|
|||
|
||||
status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
|
||||
&buffer);
|
||||
obj = (union acpi_object *)buffer.pointer;
|
||||
|
||||
/* Fail only if calling the method fails and ATIF is supported */
|
||||
/* Fail if calling the method fails and ATIF is supported */
|
||||
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
||||
DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
|
||||
acpi_format_exception(status));
|
||||
kfree(buffer.pointer);
|
||||
kfree(obj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return buffer.pointer;
|
||||
if (obj->type != ACPI_TYPE_BUFFER) {
|
||||
DRM_DEBUG_DRIVER("bad object returned from ATIF: %d\n",
|
||||
obj->type);
|
||||
kfree(obj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1053,8 +1053,10 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
|
|||
|
||||
r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT, NULL);
|
||||
if (r)
|
||||
if (r) {
|
||||
amdgpu_mes_unlock(&adev->mes);
|
||||
goto clean_up_memory;
|
||||
}
|
||||
|
||||
amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
|
||||
|
||||
|
@ -1087,7 +1089,6 @@ clean_up_ring:
|
|||
amdgpu_ring_fini(ring);
|
||||
clean_up_memory:
|
||||
kfree(ring);
|
||||
amdgpu_mes_unlock(&adev->mes);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -841,6 +841,8 @@ bool is_psr_su_specific_panel(struct dc_link *link)
|
|||
isPSRSUSupported = false;
|
||||
else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
|
||||
isPSRSUSupported = false;
|
||||
else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x01)
|
||||
isPSRSUSupported = false;
|
||||
else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
|
||||
isPSRSUSupported = true;
|
||||
}
|
||||
|
|
|
@ -722,12 +722,13 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc)
|
|||
_dpu_crtc_complete_flip(crtc);
|
||||
}
|
||||
|
||||
static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
|
||||
static int _dpu_crtc_check_and_setup_lm_bounds(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
{
|
||||
struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
|
||||
struct drm_display_mode *adj_mode = &state->adjusted_mode;
|
||||
u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
|
||||
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cstate->num_mixers; i++) {
|
||||
|
@ -738,7 +739,12 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
|
|||
r->y2 = adj_mode->vdisplay;
|
||||
|
||||
trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
|
||||
|
||||
if (drm_rect_width(r) > dpu_kms->catalog->caps->max_mixer_width)
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
|
||||
|
@ -814,7 +820,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
|
|||
|
||||
DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
|
||||
|
||||
_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
|
||||
_dpu_crtc_check_and_setup_lm_bounds(crtc, crtc->state);
|
||||
|
||||
/* encoder will trigger pending mask now */
|
||||
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
|
||||
|
@ -1208,8 +1214,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
|
|||
if (crtc_state->active_changed)
|
||||
crtc_state->mode_changed = true;
|
||||
|
||||
if (cstate->num_mixers)
|
||||
_dpu_crtc_setup_lm_bounds(crtc, crtc_state);
|
||||
if (cstate->num_mixers) {
|
||||
rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* FIXME: move this to dpu_plane_atomic_check? */
|
||||
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
|
||||
|
|
|
@ -1122,21 +1122,20 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
|
|||
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
|
||||
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
|
||||
|
||||
if (!dpu_enc->hw_pp[i]) {
|
||||
phys->hw_pp = dpu_enc->hw_pp[i];
|
||||
if (!phys->hw_pp) {
|
||||
DPU_ERROR_ENC(dpu_enc,
|
||||
"no pp block assigned at idx: %d\n", i);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!hw_ctl[i]) {
|
||||
phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL;
|
||||
if (!phys->hw_ctl) {
|
||||
DPU_ERROR_ENC(dpu_enc,
|
||||
"no ctl block assigned at idx: %d\n", i);
|
||||
return;
|
||||
}
|
||||
|
||||
phys->hw_pp = dpu_enc->hw_pp[i];
|
||||
phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
|
||||
|
||||
phys->cached_mode = crtc_state->adjusted_mode;
|
||||
if (phys->ops.atomic_mode_set)
|
||||
phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
|
||||
|
|
|
@ -280,7 +280,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
|
|||
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
|
||||
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
|
||||
intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
|
||||
if (phys_enc->hw_pp->merge_3d)
|
||||
if (intf_cfg.mode_3d && phys_enc->hw_pp->merge_3d)
|
||||
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
|
||||
|
||||
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
|
||||
|
|
|
@ -26,7 +26,7 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b
|
|||
end_addr = base_addr + aligned_len;
|
||||
|
||||
if (!(*reg))
|
||||
*reg = kzalloc(len_padded, GFP_KERNEL);
|
||||
*reg = kvzalloc(len_padded, GFP_KERNEL);
|
||||
|
||||
if (*reg)
|
||||
dump_addr = *reg;
|
||||
|
@ -48,20 +48,21 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b
|
|||
}
|
||||
}
|
||||
|
||||
static void msm_disp_state_print_regs(u32 **reg, u32 len, void __iomem *base_addr,
|
||||
struct drm_printer *p)
|
||||
static void msm_disp_state_print_regs(const u32 *dump_addr, u32 len,
|
||||
void __iomem *base_addr, struct drm_printer *p)
|
||||
{
|
||||
int i;
|
||||
u32 *dump_addr = NULL;
|
||||
void __iomem *addr;
|
||||
u32 num_rows;
|
||||
|
||||
if (!dump_addr) {
|
||||
drm_printf(p, "Registers not stored\n");
|
||||
return;
|
||||
}
|
||||
|
||||
addr = base_addr;
|
||||
num_rows = len / REG_DUMP_ALIGN;
|
||||
|
||||
if (*reg)
|
||||
dump_addr = *reg;
|
||||
|
||||
for (i = 0; i < num_rows; i++) {
|
||||
drm_printf(p, "0x%lx : %08x %08x %08x %08x\n",
|
||||
(unsigned long)(addr - base_addr),
|
||||
|
@ -89,7 +90,7 @@ void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p)
|
|||
|
||||
list_for_each_entry_safe(block, tmp, &state->blocks, node) {
|
||||
drm_printf(p, "====================%s================\n", block->name);
|
||||
msm_disp_state_print_regs(&block->state, block->size, block->base_addr, p);
|
||||
msm_disp_state_print_regs(block->state, block->size, block->base_addr, p);
|
||||
}
|
||||
|
||||
drm_printf(p, "===================dpu drm state================\n");
|
||||
|
@ -161,7 +162,7 @@ void msm_disp_state_free(void *data)
|
|||
|
||||
list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) {
|
||||
list_del(&block->node);
|
||||
kfree(block->state);
|
||||
kvfree(block->state);
|
||||
kfree(block);
|
||||
}
|
||||
|
||||
|
|
|
@ -537,7 +537,7 @@ static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mo
|
|||
|
||||
int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay;
|
||||
|
||||
return new_htotal * mode->vtotal * drm_mode_vrefresh(mode);
|
||||
return mult_frac(mode->clock * 1000u, new_htotal, mode->htotal);
|
||||
}
|
||||
|
||||
static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode,
|
||||
|
@ -545,7 +545,7 @@ static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode,
|
|||
{
|
||||
unsigned long pclk_rate;
|
||||
|
||||
pclk_rate = mode->clock * 1000;
|
||||
pclk_rate = mode->clock * 1000u;
|
||||
|
||||
if (dsc)
|
||||
pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc);
|
||||
|
|
|
@ -139,7 +139,15 @@ int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
|
|||
flags |= VBOX_MOUSE_POINTER_VISIBLE;
|
||||
}
|
||||
|
||||
p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA,
|
||||
/*
|
||||
* The 4 extra bytes come from switching struct vbva_mouse_pointer_shape
|
||||
* from having a 4 bytes fixed array at the end to using a proper VLA
|
||||
* at the end. These 4 extra bytes were not subtracted from sizeof(*p)
|
||||
* before the switch to the VLA, so this way the behavior is unchanged.
|
||||
* Chances are these 4 extra bytes are not necessary but they are kept
|
||||
* to avoid regressions.
|
||||
*/
|
||||
p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len + 4, HGSMI_CH_VBVA,
|
||||
VBVA_MOUSE_POINTER_SHAPE);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -351,10 +351,8 @@ struct vbva_mouse_pointer_shape {
|
|||
* Bytes in the gap between the AND and the XOR mask are undefined.
|
||||
* XOR mask scanlines have no gap between them and size of XOR mask is:
|
||||
* xor_len = width * 4 * height.
|
||||
*
|
||||
* Preallocate 4 bytes for accessing actual data as p->data.
|
||||
*/
|
||||
u8 data[4];
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
/* pointer is visible */
|
||||
|
|
|
@ -881,6 +881,10 @@ static int vmw_stdu_connector_atomic_check(struct drm_connector *conn,
|
|||
struct drm_crtc_state *new_crtc_state;
|
||||
|
||||
conn_state = drm_atomic_get_connector_state(state, conn);
|
||||
|
||||
if (IS_ERR(conn_state))
|
||||
return PTR_ERR(conn_state);
|
||||
|
||||
du = vmw_connector_to_stdu(conn);
|
||||
|
||||
if (!conn_state->crtc)
|
||||
|
|
|
@ -1219,7 +1219,8 @@ static int bma400_activity_event_en(struct bma400_data *data,
|
|||
static int bma400_tap_event_en(struct bma400_data *data,
|
||||
enum iio_event_direction dir, int state)
|
||||
{
|
||||
unsigned int mask, field_value;
|
||||
unsigned int mask;
|
||||
unsigned int field_value = 0;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
|
|
@ -1335,6 +1335,8 @@ config TI_LMP92064
|
|||
tristate "Texas Instruments LMP92064 ADC driver"
|
||||
depends on SPI
|
||||
select REGMAP_SPI
|
||||
select IIO_BUFFER
|
||||
select IIO_TRIGGERED_BUFFER
|
||||
help
|
||||
Say yes here to build support for the LMP92064 Precision Current and Voltage
|
||||
sensor.
|
||||
|
|
|
@ -82,25 +82,26 @@ config ADMV1014
|
|||
module will be called admv1014.
|
||||
|
||||
config ADMV4420
|
||||
tristate "Analog Devices ADMV4420 K Band Downconverter"
|
||||
depends on SPI
|
||||
help
|
||||
Say yes here to build support for Analog Devices K Band
|
||||
Downconverter with integrated Fractional-N PLL and VCO.
|
||||
tristate "Analog Devices ADMV4420 K Band Downconverter"
|
||||
depends on SPI
|
||||
select REGMAP_SPI
|
||||
help
|
||||
Say yes here to build support for Analog Devices K Band
|
||||
Downconverter with integrated Fractional-N PLL and VCO.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called admv4420.
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called admv4420.
|
||||
|
||||
config ADRF6780
|
||||
tristate "Analog Devices ADRF6780 Microwave Upconverter"
|
||||
depends on SPI
|
||||
depends on COMMON_CLK
|
||||
help
|
||||
Say yes here to build support for Analog Devices ADRF6780
|
||||
5.9 GHz to 23.6 GHz, Wideband, Microwave Upconverter.
|
||||
tristate "Analog Devices ADRF6780 Microwave Upconverter"
|
||||
depends on SPI
|
||||
depends on COMMON_CLK
|
||||
help
|
||||
Say yes here to build support for Analog Devices ADRF6780
|
||||
5.9 GHz to 23.6 GHz, Wideband, Microwave Upconverter.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called adrf6780.
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called adrf6780.
|
||||
|
||||
endmenu
|
||||
endmenu
|
||||
|
|
|
@ -269,6 +269,8 @@ rdma_find_ndev_for_src_ip_rcu(struct net *net, const struct sockaddr *src_in)
|
|||
break;
|
||||
#endif
|
||||
}
|
||||
if (!ret && dev && is_vlan_dev(dev))
|
||||
dev = vlan_dev_real_dev(dev);
|
||||
return ret ? ERR_PTR(ret) : dev;
|
||||
}
|
||||
|
||||
|
|
|
@ -366,12 +366,12 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
|
|||
goto done;
|
||||
}
|
||||
}
|
||||
if (rdev->pacing.dbr_pacing)
|
||||
if (rdev->pacing.dbr_pacing && bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
|
||||
bnxt_re_copy_db_pacing_stats(rdev, stats);
|
||||
}
|
||||
|
||||
done:
|
||||
return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
|
||||
return bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
|
||||
BNXT_RE_NUM_EXT_COUNTERS : BNXT_RE_NUM_STD_COUNTERS;
|
||||
}
|
||||
|
||||
|
@ -381,7 +381,7 @@ struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev,
|
|||
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
|
||||
int num_counters = 0;
|
||||
|
||||
if (bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
|
||||
if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
|
||||
num_counters = BNXT_RE_NUM_EXT_COUNTERS;
|
||||
else
|
||||
num_counters = BNXT_RE_NUM_STD_COUNTERS;
|
||||
|
|
|
@ -400,6 +400,10 @@ static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
|
|||
struct bnxt_re_fence_data *fence = &pd->fence;
|
||||
struct ib_mr *ib_mr = &fence->mr->ib_mr;
|
||||
struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
|
||||
struct bnxt_re_dev *rdev = pd->rdev;
|
||||
|
||||
if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
|
||||
return;
|
||||
|
||||
memset(wqe, 0, sizeof(*wqe));
|
||||
wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
|
||||
|
@ -454,6 +458,9 @@ static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
|
|||
struct device *dev = &rdev->en_dev->pdev->dev;
|
||||
struct bnxt_re_mr *mr = fence->mr;
|
||||
|
||||
if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
|
||||
return;
|
||||
|
||||
if (fence->mw) {
|
||||
bnxt_re_dealloc_mw(fence->mw);
|
||||
fence->mw = NULL;
|
||||
|
@ -485,6 +492,9 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
|
|||
struct ib_mw *mw;
|
||||
int rc;
|
||||
|
||||
if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
|
||||
return 0;
|
||||
|
||||
dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
|
||||
DMA_BIDIRECTIONAL);
|
||||
rc = dma_mapping_error(dev, dma_addr);
|
||||
|
@ -1023,7 +1033,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
|
|||
bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
|
||||
/* Consider mapping PSN search memory only for RC QPs. */
|
||||
if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
|
||||
psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
|
||||
psn_sz = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
|
||||
sizeof(struct sq_psn_search_ext) :
|
||||
sizeof(struct sq_psn_search);
|
||||
psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
|
||||
|
@ -1234,7 +1244,7 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
|
|||
qplqp = &qp->qplib_qp;
|
||||
dev_attr = &rdev->dev_attr;
|
||||
|
||||
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
|
||||
qplqp->rq.max_sge = dev_attr->max_qp_sges;
|
||||
if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
|
||||
qplqp->rq.max_sge = dev_attr->max_qp_sges;
|
||||
|
@ -1301,7 +1311,7 @@ static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
|
|||
qplqp = &qp->qplib_qp;
|
||||
dev_attr = &rdev->dev_attr;
|
||||
|
||||
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
|
||||
entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
|
||||
qplqp->sq.max_wqe = min_t(u32, entries,
|
||||
dev_attr->max_qp_wqes + 1);
|
||||
|
@ -1328,7 +1338,7 @@ static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (bnxt_qplib_is_chip_gen_p5(chip_ctx) &&
|
||||
if (bnxt_qplib_is_chip_gen_p5_p7(chip_ctx) &&
|
||||
init_attr->qp_type == IB_QPT_GSI)
|
||||
qptype = CMDQ_CREATE_QP_TYPE_GSI;
|
||||
out:
|
||||
|
@ -1527,7 +1537,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
|
|||
goto fail;
|
||||
|
||||
if (qp_init_attr->qp_type == IB_QPT_GSI &&
|
||||
!(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) {
|
||||
!(bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))) {
|
||||
rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
|
||||
if (rc == -ENODEV)
|
||||
goto qp_destroy;
|
||||
|
@ -2553,11 +2563,6 @@ static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
|
|||
wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
|
||||
wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
|
||||
|
||||
/* Need unconditional fence for local invalidate
|
||||
* opcode to work as expected.
|
||||
*/
|
||||
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
|
||||
|
||||
if (wr->send_flags & IB_SEND_SIGNALED)
|
||||
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
|
||||
if (wr->send_flags & IB_SEND_SOLICITED)
|
||||
|
@ -2580,12 +2585,6 @@ static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
|
|||
wqe->frmr.levels = qplib_frpl->hwq.level;
|
||||
wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
|
||||
|
||||
/* Need unconditional fence for reg_mr
|
||||
* opcode to function as expected.
|
||||
*/
|
||||
|
||||
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
|
||||
|
||||
if (wr->wr.send_flags & IB_SEND_SIGNALED)
|
||||
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
|
||||
|
||||
|
@ -2716,6 +2715,18 @@ bad:
|
|||
return rc;
|
||||
}
|
||||
|
||||
static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
|
||||
{
|
||||
/* Need unconditional fence for non-wire memory opcode
|
||||
* to work as expected.
|
||||
*/
|
||||
if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
|
||||
wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
|
||||
wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
|
||||
wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
|
||||
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
|
||||
}
|
||||
|
||||
int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr)
|
||||
{
|
||||
|
@ -2795,8 +2806,11 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
|
|||
rc = -EINVAL;
|
||||
goto bad;
|
||||
}
|
||||
if (!rc)
|
||||
if (!rc) {
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
|
||||
bnxt_re_legacy_set_uc_fence(&wqe);
|
||||
rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
|
||||
}
|
||||
bad:
|
||||
if (rc) {
|
||||
ibdev_err(&qp->rdev->ibdev,
|
||||
|
|
|
@ -107,9 +107,14 @@ static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
|
|||
dev_info(rdev_to_dev(rdev),
|
||||
"Couldn't get DB bar size, Low latency framework is disabled\n");
|
||||
/* set register offsets for both UC and WC */
|
||||
res->dpi_tbl.ucreg.offset = res->is_vf ? BNXT_QPLIB_DBR_VF_DB_OFFSET :
|
||||
BNXT_QPLIB_DBR_PF_DB_OFFSET;
|
||||
res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset;
|
||||
if (bnxt_qplib_is_chip_gen_p7(cctx)) {
|
||||
res->dpi_tbl.ucreg.offset = offset;
|
||||
res->dpi_tbl.wcreg.offset = en_dev->l2_db_size;
|
||||
} else {
|
||||
res->dpi_tbl.ucreg.offset = res->is_vf ? BNXT_QPLIB_DBR_VF_DB_OFFSET :
|
||||
BNXT_QPLIB_DBR_PF_DB_OFFSET;
|
||||
res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset;
|
||||
}
|
||||
|
||||
/* If WC mapping is disabled by L2 driver then en_dev->l2_db_size
|
||||
* is equal to the DB-Bar actual size. This indicates that L2
|
||||
|
@ -128,7 +133,7 @@ static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
|
|||
struct bnxt_qplib_chip_ctx *cctx;
|
||||
|
||||
cctx = rdev->chip_ctx;
|
||||
cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
|
||||
cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
|
||||
mode : BNXT_QPLIB_WQE_MODE_STATIC;
|
||||
if (bnxt_re_hwrm_qcaps(rdev))
|
||||
dev_err(rdev_to_dev(rdev),
|
||||
|
@ -176,8 +181,11 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
|
|||
|
||||
bnxt_re_set_db_offset(rdev);
|
||||
rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
kfree(rdev->chip_ctx);
|
||||
rdev->chip_ctx = NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (bnxt_qplib_determine_atomics(en_dev->pdev))
|
||||
ibdev_info(&rdev->ibdev,
|
||||
|
@ -215,7 +223,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
|
|||
ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
|
||||
attr->max_srq);
|
||||
ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq);
|
||||
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
|
||||
for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
|
||||
rdev->qplib_ctx.tqm_ctx.qcount[i] =
|
||||
rdev->dev_attr.tqm_alloc_reqs[i];
|
||||
|
@ -264,7 +272,7 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
|
|||
memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
|
||||
bnxt_re_limit_pf_res(rdev);
|
||||
|
||||
num_vfs = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
|
||||
num_vfs = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
|
||||
BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs;
|
||||
if (num_vfs)
|
||||
bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
|
||||
|
@ -276,7 +284,7 @@ static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
|
|||
if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
|
||||
return;
|
||||
rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev);
|
||||
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
|
||||
bnxt_re_set_resource_limits(rdev);
|
||||
bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
|
||||
&rdev->qplib_ctx);
|
||||
|
@ -1067,16 +1075,6 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define BNXT_RE_GEN_P5_PF_NQ_DB 0x10000
|
||||
#define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000
|
||||
static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
|
||||
{
|
||||
return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
|
||||
(rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
|
||||
BNXT_RE_GEN_P5_PF_NQ_DB) :
|
||||
rdev->en_dev->msix_entries[indx].db_offset;
|
||||
}
|
||||
|
||||
static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
int i;
|
||||
|
@ -1097,7 +1095,7 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
|
|||
bnxt_qplib_init_res(&rdev->qplib_res);
|
||||
|
||||
for (i = 1; i < rdev->num_msix ; i++) {
|
||||
db_offt = bnxt_re_get_nqdb_offset(rdev, i);
|
||||
db_offt = rdev->en_dev->msix_entries[i].db_offset;
|
||||
rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
|
||||
i - 1, rdev->en_dev->msix_entries[i].vector,
|
||||
db_offt, &bnxt_re_cqn_handler,
|
||||
|
@ -1508,7 +1506,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
|
|||
ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
|
||||
goto free_rcfw;
|
||||
}
|
||||
db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
|
||||
db_offt = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].db_offset;
|
||||
vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector;
|
||||
rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
|
||||
vid, db_offt,
|
||||
|
@ -1536,7 +1534,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
|
|||
bnxt_re_set_resource_limits(rdev);
|
||||
|
||||
rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0,
|
||||
bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx));
|
||||
bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx));
|
||||
if (rc) {
|
||||
ibdev_err(&rdev->ibdev,
|
||||
"Failed to allocate QPLIB context: %#x\n", rc);
|
||||
|
@ -1659,7 +1657,7 @@ static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
|
|||
return;
|
||||
|
||||
/* Currently enabling only for GenP5 adapters */
|
||||
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
|
||||
return;
|
||||
|
||||
if (enable) {
|
||||
|
|
|
@ -995,7 +995,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
|||
|
||||
/* SQ */
|
||||
if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
|
||||
psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
|
||||
psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
|
||||
sizeof(struct sq_psn_search_ext) :
|
||||
sizeof(struct sq_psn_search);
|
||||
|
||||
|
@ -1649,7 +1649,7 @@ static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
|
|||
flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
|
||||
SQ_PSN_SEARCH_NEXT_PSN_MASK);
|
||||
|
||||
if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
|
||||
if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
|
||||
psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
|
||||
psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
|
||||
psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
|
||||
|
|
|
@ -169,7 +169,7 @@ struct bnxt_qplib_swqe {
|
|||
};
|
||||
u32 q_key;
|
||||
u32 dst_qp;
|
||||
u16 avid;
|
||||
u32 avid;
|
||||
} send;
|
||||
|
||||
/* Send Raw Ethernet and QP1 */
|
||||
|
|
|
@ -525,7 +525,7 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
|
|||
/* failed with status */
|
||||
dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
|
||||
cookie, opcode, evnt->status);
|
||||
rc = -EFAULT;
|
||||
rc = -EIO;
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
@ -852,7 +852,7 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
|
|||
*/
|
||||
if (is_virtfn)
|
||||
goto skip_ctx_setup;
|
||||
if (bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
|
||||
if (bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx))
|
||||
goto config_vf_res;
|
||||
|
||||
lvl = ctx->qpc_tbl.level;
|
||||
|
|
|
@ -244,6 +244,8 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
|
|||
sginfo.pgsize = npde * pg_size;
|
||||
sginfo.npages = 1;
|
||||
rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
/* Alloc PBL pages */
|
||||
sginfo.npages = npbl;
|
||||
|
@ -255,22 +257,9 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
|
|||
dst_virt_ptr =
|
||||
(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
|
||||
src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
|
||||
if (hwq_attr->type == HWQ_TYPE_MR) {
|
||||
/* For MR it is expected that we supply only 1 contigous
|
||||
* page i.e only 1 entry in the PDL that will contain
|
||||
* all the PBLs for the user supplied memory region
|
||||
*/
|
||||
for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
|
||||
i++)
|
||||
dst_virt_ptr[0][i] = src_phys_ptr[i] |
|
||||
flag;
|
||||
} else {
|
||||
for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
|
||||
i++)
|
||||
dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
|
||||
src_phys_ptr[i] |
|
||||
PTU_PDE_VALID;
|
||||
}
|
||||
for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
|
||||
dst_virt_ptr[0][i] = src_phys_ptr[i] | flag;
|
||||
|
||||
/* Alloc or init PTEs */
|
||||
rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
|
||||
hwq_attr->sginfo);
|
||||
|
@ -805,7 +794,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
|
|||
dpit = &res->dpi_tbl;
|
||||
reg = &dpit->wcreg;
|
||||
|
||||
if (!bnxt_qplib_is_chip_gen_p5(res->cctx)) {
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
|
||||
/* Offest should come from L2 driver */
|
||||
dbr_offset = dev_attr->l2_db_size;
|
||||
dpit->ucreg.offset = dbr_offset;
|
||||
|
|
|
@ -44,6 +44,9 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
|
|||
#define CHIP_NUM_57508 0x1750
|
||||
#define CHIP_NUM_57504 0x1751
|
||||
#define CHIP_NUM_57502 0x1752
|
||||
#define CHIP_NUM_58818 0xd818
|
||||
#define CHIP_NUM_57608 0x1760
|
||||
|
||||
|
||||
struct bnxt_qplib_drv_modes {
|
||||
u8 wqe_mode;
|
||||
|
@ -296,6 +299,12 @@ struct bnxt_qplib_res {
|
|||
struct bnxt_qplib_db_pacing_data *pacing_data;
|
||||
};
|
||||
|
||||
static inline bool bnxt_qplib_is_chip_gen_p7(struct bnxt_qplib_chip_ctx *cctx)
|
||||
{
|
||||
return (cctx->chip_num == CHIP_NUM_58818 ||
|
||||
cctx->chip_num == CHIP_NUM_57608);
|
||||
}
|
||||
|
||||
static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
|
||||
{
|
||||
return (cctx->chip_num == CHIP_NUM_57508 ||
|
||||
|
@ -303,15 +312,20 @@ static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
|
|||
cctx->chip_num == CHIP_NUM_57502);
|
||||
}
|
||||
|
||||
static inline bool bnxt_qplib_is_chip_gen_p5_p7(struct bnxt_qplib_chip_ctx *cctx)
|
||||
{
|
||||
return bnxt_qplib_is_chip_gen_p5(cctx) || bnxt_qplib_is_chip_gen_p7(cctx);
|
||||
}
|
||||
|
||||
static inline u8 bnxt_qplib_get_hwq_type(struct bnxt_qplib_res *res)
|
||||
{
|
||||
return bnxt_qplib_is_chip_gen_p5(res->cctx) ?
|
||||
return bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
|
||||
HWQ_TYPE_QUEUE : HWQ_TYPE_L2_CMPL;
|
||||
}
|
||||
|
||||
static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx)
|
||||
{
|
||||
return bnxt_qplib_is_chip_gen_p5(cctx) ?
|
||||
return bnxt_qplib_is_chip_gen_p5_p7(cctx) ?
|
||||
RING_ALLOC_REQ_RING_TYPE_NQ :
|
||||
RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL;
|
||||
}
|
||||
|
@ -488,7 +502,7 @@ static inline void bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info *info,
|
|||
u32 type;
|
||||
|
||||
type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
|
||||
if (bnxt_qplib_is_chip_gen_p5(cctx))
|
||||
if (bnxt_qplib_is_chip_gen_p5_p7(cctx))
|
||||
bnxt_qplib_ring_db(info, type);
|
||||
else
|
||||
bnxt_qplib_ring_db32(info, arm);
|
||||
|
|
|
@ -59,7 +59,7 @@ static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
|
|||
{
|
||||
u16 pcie_ctl2 = 0;
|
||||
|
||||
if (!bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx))
|
||||
return false;
|
||||
|
||||
pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, &pcie_ctl2);
|
||||
|
@ -133,10 +133,12 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
|
|||
* reporting the max number
|
||||
*/
|
||||
attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
|
||||
attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx) ?
|
||||
attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx) ?
|
||||
6 : sb->max_sge;
|
||||
attr->max_cq = le32_to_cpu(sb->max_cq);
|
||||
attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
|
||||
if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx))
|
||||
attr->max_cq_wqes = min_t(u32, BNXT_QPLIB_MAX_CQ_WQES, attr->max_cq_wqes);
|
||||
attr->max_cq_sges = attr->max_qp_sges;
|
||||
attr->max_mr = le32_to_cpu(sb->max_mr);
|
||||
attr->max_mw = le32_to_cpu(sb->max_mw);
|
||||
|
@ -151,9 +153,17 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
|
|||
attr->max_srq_sges = sb->max_srq_sge;
|
||||
attr->max_pkey = 1;
|
||||
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
|
||||
attr->l2_db_size = (sb->l2_db_space_size + 1) *
|
||||
(0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
|
||||
attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED;
|
||||
if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx))
|
||||
attr->l2_db_size = (sb->l2_db_space_size + 1) *
|
||||
(0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
|
||||
/*
|
||||
* Read the max gid supported by HW.
|
||||
* For each entry in HW GID in HW table, we consume 2
|
||||
* GID entries in the kernel GID table. So max_gid reported
|
||||
* to stack can be up to twice the value reported by the HW, up to 256 gids.
|
||||
*/
|
||||
attr->max_sgid = le32_to_cpu(sb->max_gid);
|
||||
attr->max_sgid = min_t(u32, BNXT_QPLIB_NUM_GIDS_SUPPORTED, 2 * attr->max_sgid);
|
||||
attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
|
||||
|
||||
bnxt_qplib_query_version(rcfw, attr->fw_ver);
|
||||
|
@ -934,7 +944,7 @@ int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
|
|||
req->inactivity_th = cpu_to_le16(cc_param->inact_th);
|
||||
|
||||
/* For chip gen P5 onwards fill extended cmd and header */
|
||||
if (bnxt_qplib_is_chip_gen_p5(res->cctx)) {
|
||||
if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
|
||||
struct roce_tlv *hdr;
|
||||
u32 payload;
|
||||
u32 chunks;
|
||||
|
|
|
@ -55,6 +55,7 @@ struct bnxt_qplib_dev_attr {
|
|||
u32 max_qp_wqes;
|
||||
u32 max_qp_sges;
|
||||
u32 max_cq;
|
||||
#define BNXT_QPLIB_MAX_CQ_WQES 0xfffff
|
||||
u32 max_cq_wqes;
|
||||
u32 max_cq_sges;
|
||||
u32 max_mr;
|
||||
|
|
|
@ -2086,7 +2086,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
|
|||
err = -ENOMEM;
|
||||
if (n->dev->flags & IFF_LOOPBACK) {
|
||||
if (iptype == 4)
|
||||
pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
|
||||
pdev = __ip_dev_find(&init_net, *(__be32 *)peer_ip, false);
|
||||
else if (IS_ENABLED(CONFIG_IPV6))
|
||||
for_each_netdev(&init_net, pdev) {
|
||||
if (ipv6_chk_addr(&init_net,
|
||||
|
@ -2101,12 +2101,12 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
|
|||
err = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
if (is_vlan_dev(pdev))
|
||||
pdev = vlan_dev_real_dev(pdev);
|
||||
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
|
||||
n, pdev, rt_tos2priority(tos));
|
||||
if (!ep->l2t) {
|
||||
dev_put(pdev);
|
||||
if (!ep->l2t)
|
||||
goto out;
|
||||
}
|
||||
ep->mtu = pdev->mtu;
|
||||
ep->tx_chan = cxgb4_port_chan(pdev);
|
||||
ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
|
||||
|
@ -2119,7 +2119,6 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
|
|||
ep->rss_qid = cdev->rdev.lldi.rxq_ids[
|
||||
cxgb4_port_idx(pdev) * step];
|
||||
set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
|
||||
dev_put(pdev);
|
||||
} else {
|
||||
pdev = get_real_dev(n->dev);
|
||||
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
|
||||
|
|
|
@ -3630,7 +3630,7 @@ void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp)
|
|||
/**
|
||||
* irdma_accept - registered call for connection to be accepted
|
||||
* @cm_id: cm information for passive connection
|
||||
* @conn_param: accpet parameters
|
||||
* @conn_param: accept parameters
|
||||
*/
|
||||
int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
{
|
||||
|
|
|
@ -68,6 +68,8 @@ MODULE_LICENSE("Dual BSD/GPL");
|
|||
static u64 srpt_service_guid;
|
||||
static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */
|
||||
static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */
|
||||
static DEFINE_MUTEX(srpt_mc_mutex); /* Protects srpt_memory_caches. */
|
||||
static DEFINE_XARRAY(srpt_memory_caches); /* See also srpt_memory_cache_entry */
|
||||
|
||||
static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
|
||||
module_param(srp_max_req_size, int, 0444);
|
||||
|
@ -105,6 +107,63 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
|
|||
static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
|
||||
static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
|
||||
|
||||
/* Type of the entries in srpt_memory_caches. */
|
||||
struct srpt_memory_cache_entry {
|
||||
refcount_t ref;
|
||||
struct kmem_cache *c;
|
||||
};
|
||||
|
||||
static struct kmem_cache *srpt_cache_get(unsigned int object_size)
|
||||
{
|
||||
struct srpt_memory_cache_entry *e;
|
||||
char name[32];
|
||||
void *res;
|
||||
|
||||
guard(mutex)(&srpt_mc_mutex);
|
||||
e = xa_load(&srpt_memory_caches, object_size);
|
||||
if (e) {
|
||||
refcount_inc(&e->ref);
|
||||
return e->c;
|
||||
}
|
||||
snprintf(name, sizeof(name), "srpt-%u", object_size);
|
||||
e = kmalloc(sizeof(*e), GFP_KERNEL);
|
||||
if (!e)
|
||||
return NULL;
|
||||
refcount_set(&e->ref, 1);
|
||||
e->c = kmem_cache_create(name, object_size, /*align=*/512, 0, NULL);
|
||||
if (!e->c)
|
||||
goto free_entry;
|
||||
res = xa_store(&srpt_memory_caches, object_size, e, GFP_KERNEL);
|
||||
if (xa_is_err(res))
|
||||
goto destroy_cache;
|
||||
return e->c;
|
||||
|
||||
destroy_cache:
|
||||
kmem_cache_destroy(e->c);
|
||||
|
||||
free_entry:
|
||||
kfree(e);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void srpt_cache_put(struct kmem_cache *c)
|
||||
{
|
||||
struct srpt_memory_cache_entry *e = NULL;
|
||||
unsigned long object_size;
|
||||
|
||||
guard(mutex)(&srpt_mc_mutex);
|
||||
xa_for_each(&srpt_memory_caches, object_size, e)
|
||||
if (e->c == c)
|
||||
break;
|
||||
if (WARN_ON_ONCE(!e))
|
||||
return;
|
||||
if (!refcount_dec_and_test(&e->ref))
|
||||
return;
|
||||
WARN_ON_ONCE(xa_erase(&srpt_memory_caches, object_size) != e);
|
||||
kmem_cache_destroy(e->c);
|
||||
kfree(e);
|
||||
}
|
||||
|
||||
/*
|
||||
* The only allowed channel state changes are those that change the channel
|
||||
* state into a state with a higher numerical value. Hence the new > prev test.
|
||||
|
@ -2119,13 +2178,13 @@ static void srpt_release_channel_work(struct work_struct *w)
|
|||
ch->sport->sdev, ch->rq_size,
|
||||
ch->rsp_buf_cache, DMA_TO_DEVICE);
|
||||
|
||||
kmem_cache_destroy(ch->rsp_buf_cache);
|
||||
srpt_cache_put(ch->rsp_buf_cache);
|
||||
|
||||
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
|
||||
sdev, ch->rq_size,
|
||||
ch->req_buf_cache, DMA_FROM_DEVICE);
|
||||
|
||||
kmem_cache_destroy(ch->req_buf_cache);
|
||||
srpt_cache_put(ch->req_buf_cache);
|
||||
|
||||
kref_put(&ch->kref, srpt_free_ch);
|
||||
}
|
||||
|
@ -2245,8 +2304,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
|
|||
INIT_LIST_HEAD(&ch->cmd_wait_list);
|
||||
ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
|
||||
|
||||
ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size,
|
||||
512, 0, NULL);
|
||||
ch->rsp_buf_cache = srpt_cache_get(ch->max_rsp_size);
|
||||
if (!ch->rsp_buf_cache)
|
||||
goto free_ch;
|
||||
|
||||
|
@ -2280,8 +2338,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
|
|||
alignment_offset = round_up(imm_data_offset, 512) -
|
||||
imm_data_offset;
|
||||
req_sz = alignment_offset + imm_data_offset + srp_max_req_size;
|
||||
ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz,
|
||||
512, 0, NULL);
|
||||
ch->req_buf_cache = srpt_cache_get(req_sz);
|
||||
if (!ch->req_buf_cache)
|
||||
goto free_rsp_ring;
|
||||
|
||||
|
@ -2478,7 +2535,7 @@ free_recv_ring:
|
|||
ch->req_buf_cache, DMA_FROM_DEVICE);
|
||||
|
||||
free_recv_cache:
|
||||
kmem_cache_destroy(ch->req_buf_cache);
|
||||
srpt_cache_put(ch->req_buf_cache);
|
||||
|
||||
free_rsp_ring:
|
||||
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
|
||||
|
@ -2486,7 +2543,7 @@ free_rsp_ring:
|
|||
ch->rsp_buf_cache, DMA_TO_DEVICE);
|
||||
|
||||
free_rsp_cache:
|
||||
kmem_cache_destroy(ch->rsp_buf_cache);
|
||||
srpt_cache_put(ch->rsp_buf_cache);
|
||||
|
||||
free_ch:
|
||||
if (rdma_cm_id)
|
||||
|
@ -3055,7 +3112,7 @@ static void srpt_free_srq(struct srpt_device *sdev)
|
|||
srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
|
||||
sdev->srq_size, sdev->req_buf_cache,
|
||||
DMA_FROM_DEVICE);
|
||||
kmem_cache_destroy(sdev->req_buf_cache);
|
||||
srpt_cache_put(sdev->req_buf_cache);
|
||||
sdev->srq = NULL;
|
||||
}
|
||||
|
||||
|
@ -3082,8 +3139,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
|
|||
pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
|
||||
sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
|
||||
|
||||
sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf",
|
||||
srp_max_req_size, 0, 0, NULL);
|
||||
sdev->req_buf_cache = srpt_cache_get(srp_max_req_size);
|
||||
if (!sdev->req_buf_cache)
|
||||
goto free_srq;
|
||||
|
||||
|
@ -3105,7 +3161,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
|
|||
return 0;
|
||||
|
||||
free_cache:
|
||||
kmem_cache_destroy(sdev->req_buf_cache);
|
||||
srpt_cache_put(sdev->req_buf_cache);
|
||||
|
||||
free_srq:
|
||||
ib_destroy_srq(srq);
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/cleanup.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/io.h>
|
||||
|
@ -18,6 +19,7 @@
|
|||
#include <linux/pm_runtime.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
|
||||
#define IRQC_IRQ_START 1
|
||||
#define IRQC_IRQ_COUNT 8
|
||||
|
@ -55,12 +57,30 @@
|
|||
#define TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x))
|
||||
#define TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x))
|
||||
|
||||
struct rzg2l_irqc_priv {
|
||||
void __iomem *base;
|
||||
struct irq_fwspec fwspec[IRQC_NUM_IRQ];
|
||||
raw_spinlock_t lock;
|
||||
/**
|
||||
* struct rzg2l_irqc_reg_cache - registers cache (necessary for suspend/resume)
|
||||
* @iitsr: IITSR register
|
||||
* @titsr: TITSR registers
|
||||
*/
|
||||
struct rzg2l_irqc_reg_cache {
|
||||
u32 iitsr;
|
||||
u32 titsr[2];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct rzg2l_irqc_priv - IRQ controller private data structure
|
||||
* @base: Controller's base address
|
||||
* @fwspec: IRQ firmware specific data
|
||||
* @lock: Lock to serialize access to hardware registers
|
||||
* @cache: Registers cache for suspend/resume
|
||||
*/
|
||||
static struct rzg2l_irqc_priv {
|
||||
void __iomem *base;
|
||||
struct irq_fwspec fwspec[IRQC_NUM_IRQ];
|
||||
raw_spinlock_t lock;
|
||||
struct rzg2l_irqc_reg_cache cache;
|
||||
} *rzg2l_irqc_data;
|
||||
|
||||
static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data)
|
||||
{
|
||||
return data->domain->host_data;
|
||||
|
@ -276,6 +296,38 @@ static int rzg2l_irqc_set_type(struct irq_data *d, unsigned int type)
|
|||
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
|
||||
}
|
||||
|
||||
static int rzg2l_irqc_irq_suspend(void)
|
||||
{
|
||||
struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache;
|
||||
void __iomem *base = rzg2l_irqc_data->base;
|
||||
|
||||
cache->iitsr = readl_relaxed(base + IITSR);
|
||||
for (u8 i = 0; i < 2; i++)
|
||||
cache->titsr[i] = readl_relaxed(base + TITSR(i));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rzg2l_irqc_irq_resume(void)
|
||||
{
|
||||
struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache;
|
||||
void __iomem *base = rzg2l_irqc_data->base;
|
||||
|
||||
/*
|
||||
* Restore only interrupt type. TSSRx will be restored at the
|
||||
* request of pin controller to avoid spurious interrupts due
|
||||
* to invalid PIN states.
|
||||
*/
|
||||
for (u8 i = 0; i < 2; i++)
|
||||
writel_relaxed(cache->titsr[i], base + TITSR(i));
|
||||
writel_relaxed(cache->iitsr, base + IITSR);
|
||||
}
|
||||
|
||||
static struct syscore_ops rzg2l_irqc_syscore_ops = {
|
||||
.suspend = rzg2l_irqc_irq_suspend,
|
||||
.resume = rzg2l_irqc_irq_resume,
|
||||
};
|
||||
|
||||
static const struct irq_chip irqc_chip = {
|
||||
.name = "rzg2l-irqc",
|
||||
.irq_eoi = rzg2l_irqc_eoi,
|
||||
|
@ -357,13 +409,12 @@ static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv,
|
|||
|
||||
static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
|
||||
{
|
||||
struct platform_device *pdev = of_find_device_by_node(node);
|
||||
struct device *dev __free(put_device) = pdev ? &pdev->dev : NULL;
|
||||
struct irq_domain *irq_domain, *parent_domain;
|
||||
struct platform_device *pdev;
|
||||
struct reset_control *resetn;
|
||||
struct rzg2l_irqc_priv *priv;
|
||||
int ret;
|
||||
|
||||
pdev = of_find_device_by_node(node);
|
||||
if (!pdev)
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -373,15 +424,15 @@ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
rzg2l_irqc_data = devm_kzalloc(&pdev->dev, sizeof(*rzg2l_irqc_data), GFP_KERNEL);
|
||||
if (!rzg2l_irqc_data)
|
||||
return -ENOMEM;
|
||||
|
||||
priv->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
|
||||
if (IS_ERR(priv->base))
|
||||
return PTR_ERR(priv->base);
|
||||
rzg2l_irqc_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
|
||||
if (IS_ERR(rzg2l_irqc_data->base))
|
||||
return PTR_ERR(rzg2l_irqc_data->base);
|
||||
|
||||
ret = rzg2l_irqc_parse_interrupts(priv, node);
|
||||
ret = rzg2l_irqc_parse_interrupts(rzg2l_irqc_data, node);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
|
||||
return ret;
|
||||
|
@ -404,17 +455,30 @@ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
|
|||
goto pm_disable;
|
||||
}
|
||||
|
||||
raw_spin_lock_init(&priv->lock);
|
||||
raw_spin_lock_init(&rzg2l_irqc_data->lock);
|
||||
|
||||
irq_domain = irq_domain_add_hierarchy(parent_domain, 0, IRQC_NUM_IRQ,
|
||||
node, &rzg2l_irqc_domain_ops,
|
||||
priv);
|
||||
rzg2l_irqc_data);
|
||||
if (!irq_domain) {
|
||||
dev_err(&pdev->dev, "failed to add irq domain\n");
|
||||
ret = -ENOMEM;
|
||||
goto pm_put;
|
||||
}
|
||||
|
||||
register_syscore_ops(&rzg2l_irqc_syscore_ops);
|
||||
|
||||
/*
|
||||
* Prevent the cleanup function from invoking put_device by assigning
|
||||
* NULL to dev.
|
||||
*
|
||||
* make coccicheck will complain about missing put_device calls, but
|
||||
* those are false positives, as dev will be automatically "put" via
|
||||
* __free_put_device on the failing path.
|
||||
* On the successful path we don't actually want to "put" dev.
|
||||
*/
|
||||
dev = NULL;
|
||||
|
||||
return 0;
|
||||
|
||||
pm_put:
|
||||
|
|
|
@ -6208,7 +6208,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
|||
.invalid_port_mask = BIT(1) | BIT(2) | BIT(8),
|
||||
.num_internal_phys = 5,
|
||||
.internal_phys_offset = 3,
|
||||
.max_vid = 4095,
|
||||
.max_vid = 8191,
|
||||
.max_sid = 63,
|
||||
.port_base_addr = 0x0,
|
||||
.phy_base_addr = 0x0,
|
||||
|
|
|
@ -206,6 +206,7 @@ struct mv88e6xxx_gpio_ops;
|
|||
struct mv88e6xxx_avb_ops;
|
||||
struct mv88e6xxx_ptp_ops;
|
||||
struct mv88e6xxx_pcs_ops;
|
||||
struct mv88e6xxx_cc_coeffs;
|
||||
|
||||
struct mv88e6xxx_irq {
|
||||
u16 masked;
|
||||
|
@ -397,6 +398,7 @@ struct mv88e6xxx_chip {
|
|||
struct cyclecounter tstamp_cc;
|
||||
struct timecounter tstamp_tc;
|
||||
struct delayed_work overflow_work;
|
||||
const struct mv88e6xxx_cc_coeffs *cc_coeffs;
|
||||
|
||||
struct ptp_clock *ptp_clock;
|
||||
struct ptp_clock_info ptp_clock_info;
|
||||
|
@ -719,10 +721,6 @@ struct mv88e6xxx_ptp_ops {
|
|||
int arr1_sts_reg;
|
||||
int dep_sts_reg;
|
||||
u32 rx_filters;
|
||||
u32 cc_shift;
|
||||
u32 cc_mult;
|
||||
u32 cc_mult_num;
|
||||
u32 cc_mult_dem;
|
||||
};
|
||||
|
||||
struct mv88e6xxx_pcs_ops {
|
||||
|
|
|
@ -1713,6 +1713,7 @@ int mv88e6393x_port_set_policy(struct mv88e6xxx_chip *chip, int port,
|
|||
ptr = shift / 8;
|
||||
shift %= 8;
|
||||
mask >>= ptr * 8;
|
||||
ptr <<= 8;
|
||||
|
||||
err = mv88e6393x_port_policy_read(chip, port, ptr, ®);
|
||||
if (err)
|
||||
|
|
|
@ -18,6 +18,13 @@
|
|||
|
||||
#define MV88E6XXX_MAX_ADJ_PPB 1000000
|
||||
|
||||
struct mv88e6xxx_cc_coeffs {
|
||||
u32 cc_shift;
|
||||
u32 cc_mult;
|
||||
u32 cc_mult_num;
|
||||
u32 cc_mult_dem;
|
||||
};
|
||||
|
||||
/* Family MV88E6250:
|
||||
* Raw timestamps are in units of 10-ns clock periods.
|
||||
*
|
||||
|
@ -25,22 +32,43 @@
|
|||
* simplifies to
|
||||
* clkadj = scaled_ppm * 2^7 / 5^5
|
||||
*/
|
||||
#define MV88E6250_CC_SHIFT 28
|
||||
#define MV88E6250_CC_MULT (10 << MV88E6250_CC_SHIFT)
|
||||
#define MV88E6250_CC_MULT_NUM (1 << 7)
|
||||
#define MV88E6250_CC_MULT_DEM 3125ULL
|
||||
#define MV88E6XXX_CC_10NS_SHIFT 28
|
||||
static const struct mv88e6xxx_cc_coeffs mv88e6xxx_cc_10ns_coeffs = {
|
||||
.cc_shift = MV88E6XXX_CC_10NS_SHIFT,
|
||||
.cc_mult = 10 << MV88E6XXX_CC_10NS_SHIFT,
|
||||
.cc_mult_num = 1 << 7,
|
||||
.cc_mult_dem = 3125ULL,
|
||||
};
|
||||
|
||||
/* Other families:
|
||||
/* Other families except MV88E6393X in internal clock mode:
|
||||
* Raw timestamps are in units of 8-ns clock periods.
|
||||
*
|
||||
* clkadj = scaled_ppm * 8*2^28 / (10^6 * 2^16)
|
||||
* simplifies to
|
||||
* clkadj = scaled_ppm * 2^9 / 5^6
|
||||
*/
|
||||
#define MV88E6XXX_CC_SHIFT 28
|
||||
#define MV88E6XXX_CC_MULT (8 << MV88E6XXX_CC_SHIFT)
|
||||
#define MV88E6XXX_CC_MULT_NUM (1 << 9)
|
||||
#define MV88E6XXX_CC_MULT_DEM 15625ULL
|
||||
#define MV88E6XXX_CC_8NS_SHIFT 28
|
||||
static const struct mv88e6xxx_cc_coeffs mv88e6xxx_cc_8ns_coeffs = {
|
||||
.cc_shift = MV88E6XXX_CC_8NS_SHIFT,
|
||||
.cc_mult = 8 << MV88E6XXX_CC_8NS_SHIFT,
|
||||
.cc_mult_num = 1 << 9,
|
||||
.cc_mult_dem = 15625ULL
|
||||
};
|
||||
|
||||
/* Family MV88E6393X using internal clock:
|
||||
* Raw timestamps are in units of 4-ns clock periods.
|
||||
*
|
||||
* clkadj = scaled_ppm * 4*2^28 / (10^6 * 2^16)
|
||||
* simplifies to
|
||||
* clkadj = scaled_ppm * 2^8 / 5^6
|
||||
*/
|
||||
#define MV88E6XXX_CC_4NS_SHIFT 28
|
||||
static const struct mv88e6xxx_cc_coeffs mv88e6xxx_cc_4ns_coeffs = {
|
||||
.cc_shift = MV88E6XXX_CC_4NS_SHIFT,
|
||||
.cc_mult = 4 << MV88E6XXX_CC_4NS_SHIFT,
|
||||
.cc_mult_num = 1 << 8,
|
||||
.cc_mult_dem = 15625ULL
|
||||
};
|
||||
|
||||
#define TAI_EVENT_WORK_INTERVAL msecs_to_jiffies(100)
|
||||
|
||||
|
@ -83,6 +111,33 @@ static int mv88e6352_set_gpio_func(struct mv88e6xxx_chip *chip, int pin,
|
|||
return chip->info->ops->gpio_ops->set_pctl(chip, pin, func);
|
||||
}
|
||||
|
||||
static const struct mv88e6xxx_cc_coeffs *
|
||||
mv88e6xxx_cc_coeff_get(struct mv88e6xxx_chip *chip)
|
||||
{
|
||||
u16 period_ps;
|
||||
int err;
|
||||
|
||||
err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_CLOCK_PERIOD, &period_ps, 1);
|
||||
if (err) {
|
||||
dev_err(chip->dev, "failed to read cycle counter period: %d\n",
|
||||
err);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
switch (period_ps) {
|
||||
case 4000:
|
||||
return &mv88e6xxx_cc_4ns_coeffs;
|
||||
case 8000:
|
||||
return &mv88e6xxx_cc_8ns_coeffs;
|
||||
case 10000:
|
||||
return &mv88e6xxx_cc_10ns_coeffs;
|
||||
default:
|
||||
dev_err(chip->dev, "unexpected cycle counter period of %u ps\n",
|
||||
period_ps);
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
}
|
||||
|
||||
static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc)
|
||||
{
|
||||
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
|
||||
|
@ -200,7 +255,6 @@ out:
|
|||
static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
|
||||
{
|
||||
struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
|
||||
const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
|
||||
int neg_adj = 0;
|
||||
u32 diff, mult;
|
||||
u64 adj;
|
||||
|
@ -210,10 +264,10 @@ static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
|
|||
scaled_ppm = -scaled_ppm;
|
||||
}
|
||||
|
||||
mult = ptp_ops->cc_mult;
|
||||
adj = ptp_ops->cc_mult_num;
|
||||
mult = chip->cc_coeffs->cc_mult;
|
||||
adj = chip->cc_coeffs->cc_mult_num;
|
||||
adj *= scaled_ppm;
|
||||
diff = div_u64(adj, ptp_ops->cc_mult_dem);
|
||||
diff = div_u64(adj, chip->cc_coeffs->cc_mult_dem);
|
||||
|
||||
mv88e6xxx_reg_lock(chip);
|
||||
|
||||
|
@ -360,10 +414,6 @@ const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {
|
|||
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
|
||||
.cc_shift = MV88E6XXX_CC_SHIFT,
|
||||
.cc_mult = MV88E6XXX_CC_MULT,
|
||||
.cc_mult_num = MV88E6XXX_CC_MULT_NUM,
|
||||
.cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
|
||||
};
|
||||
|
||||
const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {
|
||||
|
@ -387,10 +437,6 @@ const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {
|
|||
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
|
||||
.cc_shift = MV88E6250_CC_SHIFT,
|
||||
.cc_mult = MV88E6250_CC_MULT,
|
||||
.cc_mult_num = MV88E6250_CC_MULT_NUM,
|
||||
.cc_mult_dem = MV88E6250_CC_MULT_DEM,
|
||||
};
|
||||
|
||||
const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
|
||||
|
@ -414,10 +460,6 @@ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
|
|||
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
|
||||
.cc_shift = MV88E6XXX_CC_SHIFT,
|
||||
.cc_mult = MV88E6XXX_CC_MULT,
|
||||
.cc_mult_num = MV88E6XXX_CC_MULT_NUM,
|
||||
.cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
|
||||
};
|
||||
|
||||
const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {
|
||||
|
@ -442,10 +484,6 @@ const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {
|
|||
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
|
||||
.cc_shift = MV88E6XXX_CC_SHIFT,
|
||||
.cc_mult = MV88E6XXX_CC_MULT,
|
||||
.cc_mult_num = MV88E6XXX_CC_MULT_NUM,
|
||||
.cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
|
||||
};
|
||||
|
||||
static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
|
||||
|
@ -458,10 +496,10 @@ static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* With a 125MHz input clock, the 32-bit timestamp counter overflows in ~34.3
|
||||
/* With a 250MHz input clock, the 32-bit timestamp counter overflows in ~17.2
|
||||
* seconds; this task forces periodic reads so that we don't miss any.
|
||||
*/
|
||||
#define MV88E6XXX_TAI_OVERFLOW_PERIOD (HZ * 16)
|
||||
#define MV88E6XXX_TAI_OVERFLOW_PERIOD (HZ * 8)
|
||||
static void mv88e6xxx_ptp_overflow_check(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *dw = to_delayed_work(work);
|
||||
|
@ -480,11 +518,15 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
|
|||
int i;
|
||||
|
||||
/* Set up the cycle counter */
|
||||
chip->cc_coeffs = mv88e6xxx_cc_coeff_get(chip);
|
||||
if (IS_ERR(chip->cc_coeffs))
|
||||
return PTR_ERR(chip->cc_coeffs);
|
||||
|
||||
memset(&chip->tstamp_cc, 0, sizeof(chip->tstamp_cc));
|
||||
chip->tstamp_cc.read = mv88e6xxx_ptp_clock_read;
|
||||
chip->tstamp_cc.mask = CYCLECOUNTER_MASK(32);
|
||||
chip->tstamp_cc.mult = ptp_ops->cc_mult;
|
||||
chip->tstamp_cc.shift = ptp_ops->cc_shift;
|
||||
chip->tstamp_cc.mult = chip->cc_coeffs->cc_mult;
|
||||
chip->tstamp_cc.shift = chip->cc_coeffs->cc_shift;
|
||||
|
||||
timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc,
|
||||
ktime_to_ns(ktime_get_real()));
|
||||
|
|
|
@ -484,7 +484,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
if (unlikely(skb->len > MAX_FRAME_SIZE)) {
|
||||
dev->stats.tx_errors++;
|
||||
goto out;
|
||||
goto len_error;
|
||||
}
|
||||
|
||||
/* Save skb pointer. */
|
||||
|
@ -575,6 +575,7 @@ frag_map_error:
|
|||
map_error:
|
||||
if (net_ratelimit())
|
||||
dev_warn(greth->dev, "Could not create TX DMA mapping\n");
|
||||
len_error:
|
||||
dev_kfree_skb(skb);
|
||||
out:
|
||||
return err;
|
||||
|
|
|
@ -322,6 +322,7 @@ static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
}
|
||||
/* Rewind so we do not have a hole */
|
||||
spb_index = intf->tx_spb_index;
|
||||
dev_kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -1359,6 +1359,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
|
|||
netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
|
||||
skb->data, skb_len);
|
||||
ret = NETDEV_TX_OK;
|
||||
dev_kfree_skb_any(skb);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -1381,10 +1381,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
|
||||
|
||||
wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
|
||||
if (unlikely(!wrb_cnt)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
goto drop;
|
||||
}
|
||||
if (unlikely(!wrb_cnt))
|
||||
goto drop_skb;
|
||||
|
||||
/* if os2bmc is enabled and if the pkt is destined to bmc,
|
||||
* enqueue the pkt a 2nd time with mgmt bit set.
|
||||
|
@ -1393,7 +1391,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
|
||||
wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
|
||||
if (unlikely(!wrb_cnt))
|
||||
goto drop;
|
||||
goto drop_skb;
|
||||
else
|
||||
skb_get(skb);
|
||||
}
|
||||
|
@ -1407,6 +1405,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
be_xmit_flush(adapter, txo);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
drop_skb:
|
||||
dev_kfree_skb_any(skb);
|
||||
drop:
|
||||
tx_stats(txo)->tx_drv_drops++;
|
||||
/* Flush the already enqueued tx requests */
|
||||
|
|
|
@ -197,55 +197,67 @@ static int mac_probe(struct platform_device *_of_dev)
|
|||
err = -EINVAL;
|
||||
goto _return_of_node_put;
|
||||
}
|
||||
mac_dev->fman_dev = &of_dev->dev;
|
||||
|
||||
/* Get the FMan cell-index */
|
||||
err = of_property_read_u32(dev_node, "cell-index", &val);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to read cell-index for %pOF\n", dev_node);
|
||||
err = -EINVAL;
|
||||
goto _return_of_node_put;
|
||||
goto _return_dev_put;
|
||||
}
|
||||
/* cell-index 0 => FMan id 1 */
|
||||
fman_id = (u8)(val + 1);
|
||||
|
||||
priv->fman = fman_bind(&of_dev->dev);
|
||||
priv->fman = fman_bind(mac_dev->fman_dev);
|
||||
if (!priv->fman) {
|
||||
dev_err(dev, "fman_bind(%pOF) failed\n", dev_node);
|
||||
err = -ENODEV;
|
||||
goto _return_of_node_put;
|
||||
goto _return_dev_put;
|
||||
}
|
||||
|
||||
/* Two references have been taken in of_find_device_by_node()
|
||||
* and fman_bind(). Release one of them here. The second one
|
||||
* will be released in mac_remove().
|
||||
*/
|
||||
put_device(mac_dev->fman_dev);
|
||||
of_node_put(dev_node);
|
||||
dev_node = NULL;
|
||||
|
||||
/* Get the address of the memory mapped registers */
|
||||
mac_dev->res = platform_get_mem_or_io(_of_dev, 0);
|
||||
if (!mac_dev->res) {
|
||||
dev_err(dev, "could not get registers\n");
|
||||
return -EINVAL;
|
||||
err = -EINVAL;
|
||||
goto _return_dev_put;
|
||||
}
|
||||
|
||||
err = devm_request_resource(dev, fman_get_mem_region(priv->fman),
|
||||
mac_dev->res);
|
||||
if (err) {
|
||||
dev_err_probe(dev, err, "could not request resource\n");
|
||||
return err;
|
||||
goto _return_dev_put;
|
||||
}
|
||||
|
||||
mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
|
||||
resource_size(mac_dev->res));
|
||||
if (!mac_dev->vaddr) {
|
||||
dev_err(dev, "devm_ioremap() failed\n");
|
||||
return -EIO;
|
||||
err = -EIO;
|
||||
goto _return_dev_put;
|
||||
}
|
||||
|
||||
if (!of_device_is_available(mac_node))
|
||||
return -ENODEV;
|
||||
if (!of_device_is_available(mac_node)) {
|
||||
err = -ENODEV;
|
||||
goto _return_dev_put;
|
||||
}
|
||||
|
||||
/* Get the cell-index */
|
||||
err = of_property_read_u32(mac_node, "cell-index", &val);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to read cell-index for %pOF\n", mac_node);
|
||||
return -EINVAL;
|
||||
err = -EINVAL;
|
||||
goto _return_dev_put;
|
||||
}
|
||||
priv->cell_index = (u8)val;
|
||||
|
||||
|
@ -259,22 +271,26 @@ static int mac_probe(struct platform_device *_of_dev)
|
|||
if (unlikely(nph < 0)) {
|
||||
dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n",
|
||||
mac_node);
|
||||
return nph;
|
||||
err = nph;
|
||||
goto _return_dev_put;
|
||||
}
|
||||
|
||||
if (nph != ARRAY_SIZE(mac_dev->port)) {
|
||||
dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n",
|
||||
mac_node);
|
||||
return -EINVAL;
|
||||
err = -EINVAL;
|
||||
goto _return_dev_put;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
|
||||
/* PORT_NUM determines the size of the port array */
|
||||
for (i = 0; i < PORT_NUM; i++) {
|
||||
/* Find the port node */
|
||||
dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
|
||||
if (!dev_node) {
|
||||
dev_err(dev, "of_parse_phandle(%pOF, fsl,fman-ports) failed\n",
|
||||
mac_node);
|
||||
return -EINVAL;
|
||||
err = -EINVAL;
|
||||
goto _return_dev_arr_put;
|
||||
}
|
||||
|
||||
of_dev = of_find_device_by_node(dev_node);
|
||||
|
@ -282,17 +298,24 @@ static int mac_probe(struct platform_device *_of_dev)
|
|||
dev_err(dev, "of_find_device_by_node(%pOF) failed\n",
|
||||
dev_node);
|
||||
err = -EINVAL;
|
||||
goto _return_of_node_put;
|
||||
goto _return_dev_arr_put;
|
||||
}
|
||||
mac_dev->fman_port_devs[i] = &of_dev->dev;
|
||||
|
||||
mac_dev->port[i] = fman_port_bind(&of_dev->dev);
|
||||
mac_dev->port[i] = fman_port_bind(mac_dev->fman_port_devs[i]);
|
||||
if (!mac_dev->port[i]) {
|
||||
dev_err(dev, "dev_get_drvdata(%pOF) failed\n",
|
||||
dev_node);
|
||||
err = -EINVAL;
|
||||
goto _return_of_node_put;
|
||||
goto _return_dev_arr_put;
|
||||
}
|
||||
/* Two references have been taken in of_find_device_by_node()
|
||||
* and fman_port_bind(). Release one of them here. The second
|
||||
* one will be released in mac_remove().
|
||||
*/
|
||||
put_device(mac_dev->fman_port_devs[i]);
|
||||
of_node_put(dev_node);
|
||||
dev_node = NULL;
|
||||
}
|
||||
|
||||
/* Get the PHY connection type */
|
||||
|
@ -312,7 +335,7 @@ static int mac_probe(struct platform_device *_of_dev)
|
|||
|
||||
err = init(mac_dev, mac_node, ¶ms);
|
||||
if (err < 0)
|
||||
return err;
|
||||
goto _return_dev_arr_put;
|
||||
|
||||
if (!is_zero_ether_addr(mac_dev->addr))
|
||||
dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr);
|
||||
|
@ -327,6 +350,12 @@ static int mac_probe(struct platform_device *_of_dev)
|
|||
|
||||
return err;
|
||||
|
||||
_return_dev_arr_put:
|
||||
/* mac_dev is kzalloc'ed */
|
||||
for (i = 0; i < PORT_NUM; i++)
|
||||
put_device(mac_dev->fman_port_devs[i]);
|
||||
_return_dev_put:
|
||||
put_device(mac_dev->fman_dev);
|
||||
_return_of_node_put:
|
||||
of_node_put(dev_node);
|
||||
return err;
|
||||
|
@ -335,6 +364,11 @@ _return_of_node_put:
|
|||
static void mac_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct mac_device *mac_dev = platform_get_drvdata(pdev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PORT_NUM; i++)
|
||||
put_device(mac_dev->fman_port_devs[i]);
|
||||
put_device(mac_dev->fman_dev);
|
||||
|
||||
platform_device_unregister(mac_dev->priv->eth_dev);
|
||||
}
|
||||
|
|
|
@ -19,12 +19,13 @@
|
|||
struct fman_mac;
|
||||
struct mac_priv_s;
|
||||
|
||||
#define PORT_NUM 2
|
||||
struct mac_device {
|
||||
void __iomem *vaddr;
|
||||
struct device *dev;
|
||||
struct resource *res;
|
||||
u8 addr[ETH_ALEN];
|
||||
struct fman_port *port[2];
|
||||
struct fman_port *port[PORT_NUM];
|
||||
struct phylink *phylink;
|
||||
struct phylink_config phylink_config;
|
||||
phy_interface_t phy_if;
|
||||
|
@ -52,6 +53,9 @@ struct mac_device {
|
|||
|
||||
struct fman_mac *fman_mac;
|
||||
struct mac_priv_s *priv;
|
||||
|
||||
struct device *fman_dev;
|
||||
struct device *fman_port_devs[PORT_NUM];
|
||||
};
|
||||
|
||||
static inline struct mac_device
|
||||
|
|
|
@ -1012,6 +1012,7 @@ sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||
if(skb->len > XMIT_BUFF_SIZE)
|
||||
{
|
||||
printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len);
|
||||
dev_kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -336,6 +336,51 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
|
|||
return new_pkts;
|
||||
}
|
||||
|
||||
/**
|
||||
* octep_oq_next_pkt() - Move to the next packet in Rx queue.
|
||||
*
|
||||
* @oq: Octeon Rx queue data structure.
|
||||
* @buff_info: Current packet buffer info.
|
||||
* @read_idx: Current packet index in the ring.
|
||||
* @desc_used: Current packet descriptor number.
|
||||
*
|
||||
* Free the resources associated with a packet.
|
||||
* Increment packet index in the ring and packet descriptor number.
|
||||
*/
|
||||
static void octep_oq_next_pkt(struct octep_oq *oq,
|
||||
struct octep_rx_buffer *buff_info,
|
||||
u32 *read_idx, u32 *desc_used)
|
||||
{
|
||||
dma_unmap_page(oq->dev, oq->desc_ring[*read_idx].buffer_ptr,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
buff_info->page = NULL;
|
||||
(*read_idx)++;
|
||||
(*desc_used)++;
|
||||
if (*read_idx == oq->max_count)
|
||||
*read_idx = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* octep_oq_drop_rx() - Free the resources associated with a packet.
|
||||
*
|
||||
* @oq: Octeon Rx queue data structure.
|
||||
* @buff_info: Current packet buffer info.
|
||||
* @read_idx: Current packet index in the ring.
|
||||
* @desc_used: Current packet descriptor number.
|
||||
*
|
||||
*/
|
||||
static void octep_oq_drop_rx(struct octep_oq *oq,
|
||||
struct octep_rx_buffer *buff_info,
|
||||
u32 *read_idx, u32 *desc_used)
|
||||
{
|
||||
int data_len = buff_info->len - oq->max_single_buffer_size;
|
||||
|
||||
while (data_len > 0) {
|
||||
octep_oq_next_pkt(oq, buff_info, read_idx, desc_used);
|
||||
data_len -= oq->buffer_size;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* __octep_oq_process_rx() - Process hardware Rx queue and push to stack.
|
||||
*
|
||||
|
@ -365,10 +410,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
|
|||
desc_used = 0;
|
||||
for (pkt = 0; pkt < pkts_to_process; pkt++) {
|
||||
buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx];
|
||||
dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
resp_hw = page_address(buff_info->page);
|
||||
buff_info->page = NULL;
|
||||
|
||||
/* Swap the length field that is in Big-Endian to CPU */
|
||||
buff_info->len = be64_to_cpu(resp_hw->length);
|
||||
|
@ -390,36 +432,33 @@ static int __octep_oq_process_rx(struct octep_device *oct,
|
|||
*/
|
||||
data_offset = OCTEP_OQ_RESP_HW_SIZE;
|
||||
}
|
||||
|
||||
octep_oq_next_pkt(oq, buff_info, &read_idx, &desc_used);
|
||||
|
||||
skb = build_skb((void *)resp_hw, PAGE_SIZE);
|
||||
if (!skb) {
|
||||
octep_oq_drop_rx(oq, buff_info,
|
||||
&read_idx, &desc_used);
|
||||
oq->stats.alloc_failures++;
|
||||
continue;
|
||||
}
|
||||
skb_reserve(skb, data_offset);
|
||||
|
||||
rx_bytes += buff_info->len;
|
||||
|
||||
if (buff_info->len <= oq->max_single_buffer_size) {
|
||||
skb = build_skb((void *)resp_hw, PAGE_SIZE);
|
||||
skb_reserve(skb, data_offset);
|
||||
skb_put(skb, buff_info->len);
|
||||
read_idx++;
|
||||
desc_used++;
|
||||
if (read_idx == oq->max_count)
|
||||
read_idx = 0;
|
||||
} else {
|
||||
struct skb_shared_info *shinfo;
|
||||
u16 data_len;
|
||||
|
||||
skb = build_skb((void *)resp_hw, PAGE_SIZE);
|
||||
skb_reserve(skb, data_offset);
|
||||
/* Head fragment includes response header(s);
|
||||
* subsequent fragments contains only data.
|
||||
*/
|
||||
skb_put(skb, oq->max_single_buffer_size);
|
||||
read_idx++;
|
||||
desc_used++;
|
||||
if (read_idx == oq->max_count)
|
||||
read_idx = 0;
|
||||
|
||||
shinfo = skb_shinfo(skb);
|
||||
data_len = buff_info->len - oq->max_single_buffer_size;
|
||||
while (data_len) {
|
||||
dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
buff_info = (struct octep_rx_buffer *)
|
||||
&oq->buff_info[read_idx];
|
||||
if (data_len < oq->buffer_size) {
|
||||
|
@ -434,11 +473,8 @@ static int __octep_oq_process_rx(struct octep_device *oct,
|
|||
buff_info->page, 0,
|
||||
buff_info->len,
|
||||
buff_info->len);
|
||||
buff_info->page = NULL;
|
||||
read_idx++;
|
||||
desc_used++;
|
||||
if (read_idx == oq->max_count)
|
||||
read_idx = 0;
|
||||
|
||||
octep_oq_next_pkt(oq, buff_info, &read_idx, &desc_used);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2298,7 +2298,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
|
|||
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
|
||||
if (!(cfg & BIT_ULL(12)))
|
||||
continue;
|
||||
bmap |= (1 << i);
|
||||
bmap |= BIT_ULL(i);
|
||||
cfg &= ~BIT_ULL(12);
|
||||
rvu_write64(rvu, blkaddr,
|
||||
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
|
||||
|
@ -2319,7 +2319,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
|
|||
|
||||
/* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
|
||||
for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
|
||||
if (!(bmap & (1 << i)))
|
||||
if (!(bmap & BIT_ULL(i)))
|
||||
continue;
|
||||
cfg = rvu_read64(rvu, blkaddr,
|
||||
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
|
||||
|
|
|
@ -1758,6 +1758,10 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
|
|||
}
|
||||
}
|
||||
|
||||
#define MLX5_MAX_MANAGE_PAGES_CMD_ENT 1
|
||||
#define MLX5_CMD_MASK ((1UL << (cmd->vars.max_reg_cmds + \
|
||||
MLX5_MAX_MANAGE_PAGES_CMD_ENT)) - 1)
|
||||
|
||||
static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_cmd *cmd = &dev->cmd;
|
||||
|
@ -1769,7 +1773,7 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
|
|||
/* wait for pending handlers to complete */
|
||||
mlx5_eq_synchronize_cmd_irq(dev);
|
||||
spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
|
||||
vector = ~dev->cmd.vars.bitmask & ((1ul << (1 << dev->cmd.vars.log_sz)) - 1);
|
||||
vector = ~dev->cmd.vars.bitmask & MLX5_CMD_MASK;
|
||||
if (!vector)
|
||||
goto no_trig;
|
||||
|
||||
|
@ -2275,7 +2279,7 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
|
|||
|
||||
cmd->state = MLX5_CMDIF_STATE_DOWN;
|
||||
cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1;
|
||||
cmd->vars.bitmask = (1UL << cmd->vars.max_reg_cmds) - 1;
|
||||
cmd->vars.bitmask = MLX5_CMD_MASK;
|
||||
|
||||
sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
|
||||
sema_init(&cmd->vars.pages_sem, 1);
|
||||
|
|
|
@ -1073,6 +1073,12 @@ int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn)
|
|||
struct mlx5_eq_comp *eq;
|
||||
int ret = 0;
|
||||
|
||||
if (vecidx >= table->max_comp_eqs) {
|
||||
mlx5_core_dbg(dev, "Requested vector index %u should be less than %u",
|
||||
vecidx, table->max_comp_eqs);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&table->comp_lock);
|
||||
eq = xa_load(&table->comp_eqs, vecidx);
|
||||
if (eq) {
|
||||
|
|
|
@ -1489,7 +1489,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
|
|||
}
|
||||
|
||||
if (err)
|
||||
goto abort;
|
||||
goto err_esw_enable;
|
||||
|
||||
esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
|
||||
|
||||
|
@ -1503,7 +1503,8 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
|
|||
|
||||
return 0;
|
||||
|
||||
abort:
|
||||
err_esw_enable:
|
||||
mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
|
||||
mlx5_esw_acls_ns_cleanup(esw);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -4576,7 +4576,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
|
|||
if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask))
|
||||
return IRQ_NONE;
|
||||
|
||||
if (unlikely(status & SYSErr)) {
|
||||
/* At least RTL8168fp may unexpectedly set the SYSErr bit */
|
||||
if (unlikely(status & SYSErr &&
|
||||
tp->mac_version <= RTL_GIGA_MAC_VER_06)) {
|
||||
rtl8169_pcierr_interrupt(tp->dev);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -1673,20 +1673,19 @@ static int ravb_get_ts_info(struct net_device *ndev,
|
|||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
const struct ravb_hw_info *hw_info = priv->info;
|
||||
|
||||
info->so_timestamping =
|
||||
SOF_TIMESTAMPING_TX_SOFTWARE |
|
||||
SOF_TIMESTAMPING_RX_SOFTWARE |
|
||||
SOF_TIMESTAMPING_SOFTWARE |
|
||||
SOF_TIMESTAMPING_TX_HARDWARE |
|
||||
SOF_TIMESTAMPING_RX_HARDWARE |
|
||||
SOF_TIMESTAMPING_RAW_HARDWARE;
|
||||
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
|
||||
info->rx_filters =
|
||||
(1 << HWTSTAMP_FILTER_NONE) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_ALL);
|
||||
if (hw_info->gptp || hw_info->ccc_gac)
|
||||
if (hw_info->gptp || hw_info->ccc_gac) {
|
||||
info->so_timestamping =
|
||||
SOF_TIMESTAMPING_TX_SOFTWARE |
|
||||
SOF_TIMESTAMPING_TX_HARDWARE |
|
||||
SOF_TIMESTAMPING_RX_HARDWARE |
|
||||
SOF_TIMESTAMPING_RAW_HARDWARE;
|
||||
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
|
||||
info->rx_filters =
|
||||
(1 << HWTSTAMP_FILTER_NONE) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_ALL);
|
||||
info->phc_index = ptp_clock_index(priv->ptp.clock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -127,10 +127,12 @@ static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_
|
|||
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_AUX_RX_IDDQ;
|
||||
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
|
||||
usleep_range(10, 20); /* 50ns min delay needed as per HW design */
|
||||
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_SLEEP;
|
||||
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
|
||||
usleep_range(10, 20); /* 500ns min delay needed as per HW design */
|
||||
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CAL_EN;
|
||||
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
|
@ -143,22 +145,30 @@ static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_
|
|||
return err;
|
||||
}
|
||||
|
||||
usleep_range(10, 20); /* 50ns min delay needed as per HW design */
|
||||
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_DATA_EN;
|
||||
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
|
||||
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
|
||||
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
|
||||
usleep_range(10, 20); /* 50ns min delay needed as per HW design */
|
||||
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
|
||||
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
|
||||
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
|
||||
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
|
||||
usleep_range(10, 20); /* 50ns min delay needed as per HW design */
|
||||
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
|
||||
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
|
||||
msleep(30); /* 30ms delay needed as per HW design */
|
||||
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
|
||||
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
|
||||
err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_IRQ_STATUS, value,
|
||||
value & XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS,
|
||||
500, 500 * 2000);
|
||||
|
|
|
@ -845,6 +845,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
if (net_ratelimit())
|
||||
netdev_err(ndev, "TX DMA mapping error\n");
|
||||
ndev->stats.tx_dropped++;
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
desc_set_phys_addr(lp, phys, cur_p);
|
||||
|
@ -865,6 +866,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
ndev->stats.tx_dropped++;
|
||||
axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
|
||||
true, NULL, 0);
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
desc_set_phys_addr(lp, phys, cur_p);
|
||||
|
|
|
@ -2795,6 +2795,31 @@ static struct hv_driver netvsc_drv = {
|
|||
},
|
||||
};
|
||||
|
||||
/* Set VF's namespace same as the synthetic NIC */
|
||||
static void netvsc_event_set_vf_ns(struct net_device *ndev)
|
||||
{
|
||||
struct net_device_context *ndev_ctx = netdev_priv(ndev);
|
||||
struct net_device *vf_netdev;
|
||||
int ret;
|
||||
|
||||
vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
|
||||
if (!vf_netdev)
|
||||
return;
|
||||
|
||||
if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
|
||||
ret = dev_change_net_namespace(vf_netdev, dev_net(ndev),
|
||||
"eth%d");
|
||||
if (ret)
|
||||
netdev_err(vf_netdev,
|
||||
"Cannot move to same namespace as %s: %d\n",
|
||||
ndev->name, ret);
|
||||
else
|
||||
netdev_info(vf_netdev,
|
||||
"Moved VF to namespace with: %s\n",
|
||||
ndev->name);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* On Hyper-V, every VF interface is matched with a corresponding
|
||||
* synthetic interface. The synthetic interface is presented first
|
||||
|
@ -2807,6 +2832,11 @@ static int netvsc_netdev_event(struct notifier_block *this,
|
|||
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
|
||||
int ret = 0;
|
||||
|
||||
if (event_dev->netdev_ops == &device_ops && event == NETDEV_REGISTER) {
|
||||
netvsc_event_set_vf_ns(event_dev);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
ret = check_dev_is_matching_vf(event_dev);
|
||||
if (ret != 0)
|
||||
return NOTIFY_DONE;
|
||||
|
|
|
@ -151,19 +151,6 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
|
|||
return sa;
|
||||
}
|
||||
|
||||
static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
|
||||
{
|
||||
struct macsec_rx_sa *sa = NULL;
|
||||
int an;
|
||||
|
||||
for (an = 0; an < MACSEC_NUM_AN; an++) {
|
||||
sa = macsec_rxsa_get(rx_sc->sa[an]);
|
||||
if (sa)
|
||||
break;
|
||||
}
|
||||
return sa;
|
||||
}
|
||||
|
||||
static void free_rx_sc_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
|
||||
|
@ -1205,15 +1192,12 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
|
|||
/* If validateFrames is Strict or the C bit in the
|
||||
* SecTAG is set, discard
|
||||
*/
|
||||
struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
|
||||
if (hdr->tci_an & MACSEC_TCI_C ||
|
||||
secy->validate_frames == MACSEC_VALIDATE_STRICT) {
|
||||
u64_stats_update_begin(&rxsc_stats->syncp);
|
||||
rxsc_stats->stats.InPktsNotUsingSA++;
|
||||
u64_stats_update_end(&rxsc_stats->syncp);
|
||||
DEV_STATS_INC(secy->netdev, rx_errors);
|
||||
if (active_rx_sa)
|
||||
this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
|
||||
goto drop_nosa;
|
||||
}
|
||||
|
||||
|
@ -1223,8 +1207,6 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
|
|||
u64_stats_update_begin(&rxsc_stats->syncp);
|
||||
rxsc_stats->stats.InPktsUnusedSA++;
|
||||
u64_stats_update_end(&rxsc_stats->syncp);
|
||||
if (active_rx_sa)
|
||||
this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
|
||||
goto deliver;
|
||||
}
|
||||
|
||||
|
|
|
@ -836,7 +836,8 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
|
|||
nsim_dev = nsim_trap_data->nsim_dev;
|
||||
|
||||
if (!devl_trylock(priv_to_devlink(nsim_dev))) {
|
||||
schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 1);
|
||||
queue_delayed_work(system_unbound_wq,
|
||||
&nsim_dev->trap_data->trap_report_dw, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -848,11 +849,12 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
|
|||
continue;
|
||||
|
||||
nsim_dev_trap_report(nsim_dev_port);
|
||||
cond_resched();
|
||||
}
|
||||
devl_unlock(priv_to_devlink(nsim_dev));
|
||||
|
||||
schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
|
||||
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
|
||||
queue_delayed_work(system_unbound_wq,
|
||||
&nsim_dev->trap_data->trap_report_dw,
|
||||
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
|
||||
}
|
||||
|
||||
static int nsim_dev_traps_init(struct devlink *devlink)
|
||||
|
@ -907,8 +909,9 @@ static int nsim_dev_traps_init(struct devlink *devlink)
|
|||
|
||||
INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw,
|
||||
nsim_dev_trap_report_work);
|
||||
schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
|
||||
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
|
||||
queue_delayed_work(system_unbound_wq,
|
||||
&nsim_dev->trap_data->trap_report_dw,
|
||||
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -40,8 +40,8 @@
|
|||
/* Control Register 2 bits */
|
||||
#define DP83822_FX_ENABLE BIT(14)
|
||||
|
||||
#define DP83822_HW_RESET BIT(15)
|
||||
#define DP83822_SW_RESET BIT(14)
|
||||
#define DP83822_SW_RESET BIT(15)
|
||||
#define DP83822_DIG_RESTART BIT(14)
|
||||
|
||||
/* PHY STS bits */
|
||||
#define DP83822_PHYSTS_DUPLEX BIT(2)
|
||||
|
|
|
@ -815,7 +815,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl,
|
|||
return HS_TIMEOUT;
|
||||
}
|
||||
}
|
||||
break;
|
||||
fallthrough;
|
||||
|
||||
case PLIP_PK_LENGTH_LSB:
|
||||
if (plip_send(nibble_timeout, dev,
|
||||
|
|
|
@ -1771,7 +1771,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
|
|||
// can rename the link if it knows better.
|
||||
if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
|
||||
((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
|
||||
(net->dev_addr [0] & 0x02) == 0))
|
||||
/* somebody touched it*/
|
||||
!is_zero_ether_addr(net->dev_addr)))
|
||||
strscpy(net->name, "eth%d", sizeof(net->name));
|
||||
/* WLAN devices should always be named "wlan%d" */
|
||||
if ((dev->driver_info->flags & FLAG_WLAN) != 0)
|
||||
|
@ -1874,6 +1875,7 @@ out1:
|
|||
* may trigger an error resubmitting itself and, worse,
|
||||
* schedule a timer. So we kill it all just in case.
|
||||
*/
|
||||
usbnet_mark_going_away(dev);
|
||||
cancel_work_sync(&dev->kevent);
|
||||
del_timer_sync(&dev->delay);
|
||||
free_percpu(net->tstats);
|
||||
|
|
|
@ -148,7 +148,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
|
|||
} else { /* XDP buffer from page pool */
|
||||
page = virt_to_page(xdpf->data);
|
||||
tbi->dma_addr = page_pool_get_dma_addr(page) +
|
||||
VMXNET3_XDP_HEADROOM;
|
||||
(xdpf->data - (void *)xdpf);
|
||||
dma_sync_single_for_device(&adapter->pdev->dev,
|
||||
tbi->dma_addr, buf_size,
|
||||
DMA_TO_DEVICE);
|
||||
|
|
|
@ -1031,7 +1031,7 @@ static const struct nla_policy wwan_rtnl_policy[IFLA_WWAN_MAX + 1] = {
|
|||
|
||||
static struct rtnl_link_ops wwan_rtnl_link_ops __read_mostly = {
|
||||
.kind = "wwan",
|
||||
.maxtype = __IFLA_WWAN_MAX,
|
||||
.maxtype = IFLA_WWAN_MAX,
|
||||
.alloc = wwan_rtnl_alloc,
|
||||
.validate = wwan_rtnl_validate,
|
||||
.newlink = wwan_rtnl_newlink,
|
||||
|
|
|
@ -2469,17 +2469,29 @@ static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
|
||||
static bool nvme_pci_update_nr_queues(struct nvme_dev *dev)
|
||||
{
|
||||
if (!dev->ctrl.tagset) {
|
||||
nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops,
|
||||
nvme_pci_nr_maps(dev), sizeof(struct nvme_iod));
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Give up if we are racing with nvme_dev_disable() */
|
||||
if (!mutex_trylock(&dev->shutdown_lock))
|
||||
return false;
|
||||
|
||||
/* Check if nvme_dev_disable() has been executed already */
|
||||
if (!dev->online_queues) {
|
||||
mutex_unlock(&dev->shutdown_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
|
||||
/* free previously allocated queues that are no longer usable */
|
||||
nvme_free_queues(dev, dev->online_queues);
|
||||
mutex_unlock(&dev->shutdown_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
static int nvme_pci_enable(struct nvme_dev *dev)
|
||||
|
@ -2757,10 +2769,11 @@ static void nvme_reset_work(struct work_struct *work)
|
|||
* controller around but remove all namespaces.
|
||||
*/
|
||||
if (dev->online_queues > 1) {
|
||||
nvme_dbbuf_set(dev);
|
||||
nvme_unquiesce_io_queues(&dev->ctrl);
|
||||
nvme_wait_freeze(&dev->ctrl);
|
||||
nvme_pci_update_nr_queues(dev);
|
||||
nvme_dbbuf_set(dev);
|
||||
if (!nvme_pci_update_nr_queues(dev))
|
||||
goto out;
|
||||
nvme_unfreeze(&dev->ctrl);
|
||||
} else {
|
||||
dev_warn(dev->ctrl.device, "IO queues lost\n");
|
||||
|
|
|
@ -264,6 +264,15 @@ static const struct key_entry dell_wmi_keymap_type_0010[] = {
|
|||
/*Speaker Mute*/
|
||||
{ KE_KEY, 0x109, { KEY_MUTE} },
|
||||
|
||||
/* S2Idle screen off */
|
||||
{ KE_IGNORE, 0x120, { KEY_RESERVED }},
|
||||
|
||||
/* Leaving S4 or S2Idle suspend */
|
||||
{ KE_IGNORE, 0x130, { KEY_RESERVED }},
|
||||
|
||||
/* Entering S2Idle suspend */
|
||||
{ KE_IGNORE, 0x140, { KEY_RESERVED }},
|
||||
|
||||
/* Mic mute */
|
||||
{ KE_KEY, 0x150, { KEY_MICMUTE } },
|
||||
|
||||
|
|
|
@ -521,6 +521,7 @@ static int __init sysman_init(void)
|
|||
int ret = 0;
|
||||
|
||||
if (!dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL) &&
|
||||
!dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Alienware", NULL) &&
|
||||
!dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "www.dell.com", NULL)) {
|
||||
pr_err("Unable to run on non-Dell system\n");
|
||||
return -ENODEV;
|
||||
|
|
|
@ -166,7 +166,7 @@ static int __dtpm_devfreq_setup(struct devfreq *devfreq, struct dtpm *parent)
|
|||
ret = dev_pm_qos_add_request(dev, &dtpm_devfreq->qos_req,
|
||||
DEV_PM_QOS_MAX_FREQUENCY,
|
||||
PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
|
||||
if (ret) {
|
||||
if (ret < 0) {
|
||||
pr_err("Failed to add QoS request: %d\n", ret);
|
||||
goto out_dtpm_unregister;
|
||||
}
|
||||
|
|
|
@ -692,7 +692,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
|
|||
|
||||
dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
|
||||
if (!dev->queues) {
|
||||
dev->transport->free_device(dev);
|
||||
hba->backend->ops->free_device(dev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -2130,7 +2130,7 @@ static int tcmu_netlink_event_send(struct tcmu_dev *udev,
|
|||
}
|
||||
|
||||
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
|
||||
TCMU_MCGRP_CONFIG, GFP_KERNEL);
|
||||
TCMU_MCGRP_CONFIG);
|
||||
|
||||
/* Wait during an add as the listener may not be up yet */
|
||||
if (ret == 0 ||
|
||||
|
|
|
@ -2106,6 +2106,11 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
|
|||
{
|
||||
u32 reg;
|
||||
|
||||
dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) &
|
||||
DWC3_GUSB2PHYCFG_SUSPHY) ||
|
||||
(dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)) &
|
||||
DWC3_GUSB3PIPECTL_SUSPHY);
|
||||
|
||||
switch (dwc->current_dr_role) {
|
||||
case DWC3_GCTL_PRTCAP_DEVICE:
|
||||
if (pm_runtime_suspended(dwc->dev))
|
||||
|
@ -2153,6 +2158,15 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
|
|||
break;
|
||||
}
|
||||
|
||||
if (!PMSG_IS_AUTO(msg)) {
|
||||
/*
|
||||
* TI AM62 platform requires SUSPHY to be
|
||||
* enabled for system suspend to work.
|
||||
*/
|
||||
if (!dwc->susphy_state)
|
||||
dwc3_enable_susphy(dwc, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2215,6 +2229,11 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
|
|||
break;
|
||||
}
|
||||
|
||||
if (!PMSG_IS_AUTO(msg)) {
|
||||
/* restore SUSPHY state to that before system suspend. */
|
||||
dwc3_enable_susphy(dwc, dwc->susphy_state);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1127,6 +1127,8 @@ struct dwc3_scratchpad_array {
|
|||
* @sys_wakeup: set if the device may do system wakeup.
|
||||
* @wakeup_configured: set if the device is configured for remote wakeup.
|
||||
* @suspended: set to track suspend event due to U3/L2.
|
||||
* @susphy_state: state of DWC3_GUSB2PHYCFG_SUSPHY + DWC3_GUSB3PIPECTL_SUSPHY
|
||||
* before PM suspend.
|
||||
* @imod_interval: set the interrupt moderation interval in 250ns
|
||||
* increments or 0 to disable.
|
||||
* @max_cfg_eps: current max number of IN eps used across all USB configs.
|
||||
|
@ -1351,6 +1353,7 @@ struct dwc3 {
|
|||
unsigned sys_wakeup:1;
|
||||
unsigned wakeup_configured:1;
|
||||
unsigned suspended:1;
|
||||
unsigned susphy_state:1;
|
||||
|
||||
u16 imod_interval;
|
||||
|
||||
|
|
|
@ -2042,7 +2042,7 @@ static ssize_t f_uac2_opts_##name##_show(struct config_item *item, \
|
|||
int result; \
|
||||
\
|
||||
mutex_lock(&opts->lock); \
|
||||
result = snprintf(page, sizeof(opts->name), "%s", opts->name); \
|
||||
result = scnprintf(page, sizeof(opts->name), "%s", opts->name); \
|
||||
mutex_unlock(&opts->lock); \
|
||||
\
|
||||
return result; \
|
||||
|
@ -2052,7 +2052,7 @@ static ssize_t f_uac2_opts_##name##_store(struct config_item *item, \
|
|||
const char *page, size_t len) \
|
||||
{ \
|
||||
struct f_uac2_opts *opts = to_f_uac2_opts(item); \
|
||||
int ret = 0; \
|
||||
int ret = len; \
|
||||
\
|
||||
mutex_lock(&opts->lock); \
|
||||
if (opts->refcnt) { \
|
||||
|
@ -2060,8 +2060,11 @@ static ssize_t f_uac2_opts_##name##_store(struct config_item *item, \
|
|||
goto end; \
|
||||
} \
|
||||
\
|
||||
ret = snprintf(opts->name, min(sizeof(opts->name), len), \
|
||||
"%s", page); \
|
||||
if (len && page[len - 1] == '\n') \
|
||||
len--; \
|
||||
\
|
||||
scnprintf(opts->name, min(sizeof(opts->name), len + 1), \
|
||||
"%s", page); \
|
||||
\
|
||||
end: \
|
||||
mutex_unlock(&opts->lock); \
|
||||
|
@ -2178,7 +2181,7 @@ static struct usb_function_instance *afunc_alloc_inst(void)
|
|||
opts->req_number = UAC2_DEF_REQ_NUM;
|
||||
opts->fb_max = FBACK_FAST_MAX;
|
||||
|
||||
snprintf(opts->function_name, sizeof(opts->function_name), "Source/Sink");
|
||||
scnprintf(opts->function_name, sizeof(opts->function_name), "Source/Sink");
|
||||
|
||||
return &opts->func_inst;
|
||||
}
|
||||
|
|
85
drivers/usb/host/xhci-caps.h
Normal file
85
drivers/usb/host/xhci-caps.h
Normal file
|
@ -0,0 +1,85 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
/* hc_capbase bitmasks */
|
||||
/* bits 7:0 - how long is the Capabilities register */
|
||||
#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
|
||||
/* bits 31:16 */
|
||||
#define HC_VERSION(p) (((p) >> 16) & 0xffff)
|
||||
|
||||
/* HCSPARAMS1 - hcs_params1 - bitmasks */
|
||||
/* bits 0:7, Max Device Slots */
|
||||
#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
|
||||
#define HCS_SLOTS_MASK 0xff
|
||||
/* bits 8:18, Max Interrupters */
|
||||
#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
|
||||
/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
|
||||
#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
|
||||
|
||||
/* HCSPARAMS2 - hcs_params2 - bitmasks */
|
||||
/* bits 0:3, frames or uframes that SW needs to queue transactions
|
||||
* ahead of the HW to meet periodic deadlines */
|
||||
#define HCS_IST(p) (((p) >> 0) & 0xf)
|
||||
/* bits 4:7, max number of Event Ring segments */
|
||||
#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
|
||||
/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
|
||||
/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
|
||||
/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
|
||||
#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
|
||||
|
||||
/* HCSPARAMS3 - hcs_params3 - bitmasks */
|
||||
/* bits 0:7, Max U1 to U0 latency for the roothub ports */
|
||||
#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
|
||||
/* bits 16:31, Max U2 to U0 latency for the roothub ports */
|
||||
#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
|
||||
|
||||
/* HCCPARAMS - hcc_params - bitmasks */
|
||||
/* true: HC can use 64-bit address pointers */
|
||||
#define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
|
||||
/* true: HC can do bandwidth negotiation */
|
||||
#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
|
||||
/* true: HC uses 64-byte Device Context structures
|
||||
* FIXME 64-byte context structures aren't supported yet.
|
||||
*/
|
||||
#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
|
||||
/* true: HC has port power switches */
|
||||
#define HCC_PPC(p) ((p) & (1 << 3))
|
||||
/* true: HC has port indicators */
|
||||
#define HCS_INDICATOR(p) ((p) & (1 << 4))
|
||||
/* true: HC has Light HC Reset Capability */
|
||||
#define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
|
||||
/* true: HC supports latency tolerance messaging */
|
||||
#define HCC_LTC(p) ((p) & (1 << 6))
|
||||
/* true: no secondary Stream ID Support */
|
||||
#define HCC_NSS(p) ((p) & (1 << 7))
|
||||
/* true: HC supports Stopped - Short Packet */
|
||||
#define HCC_SPC(p) ((p) & (1 << 9))
|
||||
/* true: HC has Contiguous Frame ID Capability */
|
||||
#define HCC_CFC(p) ((p) & (1 << 11))
|
||||
/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
|
||||
#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
|
||||
/* Extended Capabilities pointer from PCI base - section 5.3.6 */
|
||||
#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
|
||||
|
||||
#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
|
||||
|
||||
/* db_off bitmask - bits 0:1 reserved */
|
||||
#define DBOFF_MASK (~0x3)
|
||||
|
||||
/* run_regs_off bitmask - bits 0:4 reserved */
|
||||
#define RTSOFF_MASK (~0x1f)
|
||||
|
||||
/* HCCPARAMS2 - hcc_params2 - bitmasks */
|
||||
/* true: HC supports U3 entry Capability */
|
||||
#define HCC2_U3C(p) ((p) & (1 << 0))
|
||||
/* true: HC supports Configure endpoint command Max exit latency too large */
|
||||
#define HCC2_CMC(p) ((p) & (1 << 1))
|
||||
/* true: HC supports Force Save context Capability */
|
||||
#define HCC2_FSC(p) ((p) & (1 << 2))
|
||||
/* true: HC supports Compliance Transition Capability */
|
||||
#define HCC2_CTC(p) ((p) & (1 << 3))
|
||||
/* true: HC support Large ESIT payload Capability > 48k */
|
||||
#define HCC2_LEC(p) ((p) & (1 << 4))
|
||||
/* true: HC support Configuration Information Capability */
|
||||
#define HCC2_CIC(p) ((p) & (1 << 5))
|
||||
/* true: HC support Extended TBC Capability, Isoc burst count > 65535 */
|
||||
#define HCC2_ETC(p) ((p) & (1 << 6))
|
|
@ -108,7 +108,7 @@ struct dbc_port {
|
|||
struct tasklet_struct push;
|
||||
|
||||
struct list_head write_pool;
|
||||
struct kfifo write_fifo;
|
||||
unsigned int tx_boundary;
|
||||
|
||||
bool registered;
|
||||
};
|
||||
|
|
|
@ -25,16 +25,26 @@ static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc)
|
|||
}
|
||||
|
||||
static unsigned int
|
||||
dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
|
||||
dbc_kfifo_to_req(struct dbc_port *port, char *packet)
|
||||
{
|
||||
unsigned int len;
|
||||
unsigned int len;
|
||||
|
||||
len = kfifo_len(&port->write_fifo);
|
||||
if (len < size)
|
||||
size = len;
|
||||
if (size != 0)
|
||||
size = kfifo_out(&port->write_fifo, packet, size);
|
||||
return size;
|
||||
len = kfifo_len(&port->port.xmit_fifo);
|
||||
|
||||
if (len == 0)
|
||||
return 0;
|
||||
|
||||
len = min(len, DBC_MAX_PACKET);
|
||||
|
||||
if (port->tx_boundary)
|
||||
len = min(port->tx_boundary, len);
|
||||
|
||||
len = kfifo_out(&port->port.xmit_fifo, packet, len);
|
||||
|
||||
if (port->tx_boundary)
|
||||
port->tx_boundary -= len;
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static int dbc_start_tx(struct dbc_port *port)
|
||||
|
@ -49,7 +59,7 @@ static int dbc_start_tx(struct dbc_port *port)
|
|||
|
||||
while (!list_empty(pool)) {
|
||||
req = list_entry(pool->next, struct dbc_request, list_pool);
|
||||
len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
|
||||
len = dbc_kfifo_to_req(port, req->buf);
|
||||
if (len == 0)
|
||||
break;
|
||||
do_tty_wake = true;
|
||||
|
@ -213,14 +223,32 @@ static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf,
|
|||
{
|
||||
struct dbc_port *port = tty->driver_data;
|
||||
unsigned long flags;
|
||||
unsigned int written = 0;
|
||||
|
||||
spin_lock_irqsave(&port->port_lock, flags);
|
||||
if (count)
|
||||
count = kfifo_in(&port->write_fifo, buf, count);
|
||||
dbc_start_tx(port);
|
||||
|
||||
/*
|
||||
* Treat tty write as one usb transfer. Make sure the writes are turned
|
||||
* into TRB request having the same size boundaries as the tty writes.
|
||||
* Don't add data to kfifo before previous write is turned into TRBs
|
||||
*/
|
||||
if (port->tx_boundary) {
|
||||
spin_unlock_irqrestore(&port->port_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (count) {
|
||||
written = kfifo_in(&port->port.xmit_fifo, buf, count);
|
||||
|
||||
if (written == count)
|
||||
port->tx_boundary = kfifo_len(&port->port.xmit_fifo);
|
||||
|
||||
dbc_start_tx(port);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&port->port_lock, flags);
|
||||
|
||||
return count;
|
||||
return written;
|
||||
}
|
||||
|
||||
static int dbc_tty_put_char(struct tty_struct *tty, u8 ch)
|
||||
|
@ -230,7 +258,7 @@ static int dbc_tty_put_char(struct tty_struct *tty, u8 ch)
|
|||
int status;
|
||||
|
||||
spin_lock_irqsave(&port->port_lock, flags);
|
||||
status = kfifo_put(&port->write_fifo, ch);
|
||||
status = kfifo_put(&port->port.xmit_fifo, ch);
|
||||
spin_unlock_irqrestore(&port->port_lock, flags);
|
||||
|
||||
return status;
|
||||
|
@ -253,7 +281,11 @@ static unsigned int dbc_tty_write_room(struct tty_struct *tty)
|
|||
unsigned int room;
|
||||
|
||||
spin_lock_irqsave(&port->port_lock, flags);
|
||||
room = kfifo_avail(&port->write_fifo);
|
||||
room = kfifo_avail(&port->port.xmit_fifo);
|
||||
|
||||
if (port->tx_boundary)
|
||||
room = 0;
|
||||
|
||||
spin_unlock_irqrestore(&port->port_lock, flags);
|
||||
|
||||
return room;
|
||||
|
@ -266,7 +298,7 @@ static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty)
|
|||
unsigned int chars;
|
||||
|
||||
spin_lock_irqsave(&port->port_lock, flags);
|
||||
chars = kfifo_len(&port->write_fifo);
|
||||
chars = kfifo_len(&port->port.xmit_fifo);
|
||||
spin_unlock_irqrestore(&port->port_lock, flags);
|
||||
|
||||
return chars;
|
||||
|
@ -424,7 +456,8 @@ static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
|
|||
goto err_idr;
|
||||
}
|
||||
|
||||
ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
|
||||
ret = kfifo_alloc(&port->port.xmit_fifo, DBC_WRITE_BUF_SIZE,
|
||||
GFP_KERNEL);
|
||||
if (ret)
|
||||
goto err_exit_port;
|
||||
|
||||
|
@ -453,7 +486,7 @@ err_free_requests:
|
|||
xhci_dbc_free_requests(&port->read_pool);
|
||||
xhci_dbc_free_requests(&port->write_pool);
|
||||
err_free_fifo:
|
||||
kfifo_free(&port->write_fifo);
|
||||
kfifo_free(&port->port.xmit_fifo);
|
||||
err_exit_port:
|
||||
idr_remove(&dbc_tty_minors, port->minor);
|
||||
err_idr:
|
||||
|
@ -478,7 +511,7 @@ static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
|
|||
idr_remove(&dbc_tty_minors, port->minor);
|
||||
mutex_unlock(&dbc_tty_minors_lock);
|
||||
|
||||
kfifo_free(&port->write_fifo);
|
||||
kfifo_free(&port->port.xmit_fifo);
|
||||
xhci_dbc_free_requests(&port->read_pool);
|
||||
xhci_dbc_free_requests(&port->read_queue);
|
||||
xhci_dbc_free_requests(&port->write_pool);
|
||||
|
|
176
drivers/usb/host/xhci-port.h
Normal file
176
drivers/usb/host/xhci-port.h
Normal file
|
@ -0,0 +1,176 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
|
||||
/* true: device connected */
|
||||
#define PORT_CONNECT (1 << 0)
|
||||
/* true: port enabled */
|
||||
#define PORT_PE (1 << 1)
|
||||
/* bit 2 reserved and zeroed */
|
||||
/* true: port has an over-current condition */
|
||||
#define PORT_OC (1 << 3)
|
||||
/* true: port reset signaling asserted */
|
||||
#define PORT_RESET (1 << 4)
|
||||
/* Port Link State - bits 5:8
|
||||
* A read gives the current link PM state of the port,
|
||||
* a write with Link State Write Strobe set sets the link state.
|
||||
*/
|
||||
#define PORT_PLS_MASK (0xf << 5)
|
||||
#define XDEV_U0 (0x0 << 5)
|
||||
#define XDEV_U1 (0x1 << 5)
|
||||
#define XDEV_U2 (0x2 << 5)
|
||||
#define XDEV_U3 (0x3 << 5)
|
||||
#define XDEV_DISABLED (0x4 << 5)
|
||||
#define XDEV_RXDETECT (0x5 << 5)
|
||||
#define XDEV_INACTIVE (0x6 << 5)
|
||||
#define XDEV_POLLING (0x7 << 5)
|
||||
#define XDEV_RECOVERY (0x8 << 5)
|
||||
#define XDEV_HOT_RESET (0x9 << 5)
|
||||
#define XDEV_COMP_MODE (0xa << 5)
|
||||
#define XDEV_TEST_MODE (0xb << 5)
|
||||
#define XDEV_RESUME (0xf << 5)
|
||||
|
||||
/* true: port has power (see HCC_PPC) */
|
||||
#define PORT_POWER (1 << 9)
|
||||
/* bits 10:13 indicate device speed:
|
||||
* 0 - undefined speed - port hasn't be initialized by a reset yet
|
||||
* 1 - full speed
|
||||
* 2 - low speed
|
||||
* 3 - high speed
|
||||
* 4 - super speed
|
||||
* 5-15 reserved
|
||||
*/
|
||||
#define DEV_SPEED_MASK (0xf << 10)
|
||||
#define XDEV_FS (0x1 << 10)
|
||||
#define XDEV_LS (0x2 << 10)
|
||||
#define XDEV_HS (0x3 << 10)
|
||||
#define XDEV_SS (0x4 << 10)
|
||||
#define XDEV_SSP (0x5 << 10)
|
||||
#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
|
||||
#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
|
||||
#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
|
||||
#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
|
||||
#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
|
||||
#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP)
|
||||
#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS)
|
||||
#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f)
|
||||
|
||||
/* Bits 20:23 in the Slot Context are the speed for the device */
|
||||
#define SLOT_SPEED_FS (XDEV_FS << 10)
|
||||
#define SLOT_SPEED_LS (XDEV_LS << 10)
|
||||
#define SLOT_SPEED_HS (XDEV_HS << 10)
|
||||
#define SLOT_SPEED_SS (XDEV_SS << 10)
|
||||
#define SLOT_SPEED_SSP (XDEV_SSP << 10)
|
||||
/* Port Indicator Control */
|
||||
#define PORT_LED_OFF (0 << 14)
|
||||
#define PORT_LED_AMBER (1 << 14)
|
||||
#define PORT_LED_GREEN (2 << 14)
|
||||
#define PORT_LED_MASK (3 << 14)
|
||||
/* Port Link State Write Strobe - set this when changing link state */
|
||||
#define PORT_LINK_STROBE (1 << 16)
|
||||
/* true: connect status change */
|
||||
#define PORT_CSC (1 << 17)
|
||||
/* true: port enable change */
|
||||
#define PORT_PEC (1 << 18)
|
||||
/* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port
|
||||
* into an enabled state, and the device into the default state. A "warm" reset
|
||||
* also resets the link, forcing the device through the link training sequence.
|
||||
* SW can also look at the Port Reset register to see when warm reset is done.
|
||||
*/
|
||||
#define PORT_WRC (1 << 19)
|
||||
/* true: over-current change */
|
||||
#define PORT_OCC (1 << 20)
|
||||
/* true: reset change - 1 to 0 transition of PORT_RESET */
|
||||
#define PORT_RC (1 << 21)
|
||||
/* port link status change - set on some port link state transitions:
|
||||
* Transition Reason
|
||||
* ------------------------------------------------------------------------------
|
||||
* - U3 to Resume Wakeup signaling from a device
|
||||
* - Resume to Recovery to U0 USB 3.0 device resume
|
||||
* - Resume to U0 USB 2.0 device resume
|
||||
* - U3 to Recovery to U0 Software resume of USB 3.0 device complete
|
||||
* - U3 to U0 Software resume of USB 2.0 device complete
|
||||
* - U2 to U0 L1 resume of USB 2.1 device complete
|
||||
* - U0 to U0 (???) L1 entry rejection by USB 2.1 device
|
||||
* - U0 to disabled L1 entry error with USB 2.1 device
|
||||
* - Any state to inactive Error on USB 3.0 port
|
||||
*/
|
||||
#define PORT_PLC (1 << 22)
|
||||
/* port configure error change - port failed to configure its link partner */
|
||||
#define PORT_CEC (1 << 23)
|
||||
#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
|
||||
PORT_RC | PORT_PLC | PORT_CEC)
|
||||
|
||||
|
||||
/* Cold Attach Status - xHC can set this bit to report device attached during
|
||||
* Sx state. Warm port reset should be perfomed to clear this bit and move port
|
||||
* to connected state.
|
||||
*/
|
||||
#define PORT_CAS (1 << 24)
|
||||
/* wake on connect (enable) */
|
||||
#define PORT_WKCONN_E (1 << 25)
|
||||
/* wake on disconnect (enable) */
|
||||
#define PORT_WKDISC_E (1 << 26)
|
||||
/* wake on over-current (enable) */
|
||||
#define PORT_WKOC_E (1 << 27)
|
||||
/* bits 28:29 reserved */
|
||||
/* true: device is non-removable - for USB 3.0 roothub emulation */
|
||||
#define PORT_DEV_REMOVE (1 << 30)
|
||||
/* Initiate a warm port reset - complete when PORT_WRC is '1' */
|
||||
#define PORT_WR (1 << 31)
|
||||
|
||||
/* We mark duplicate entries with -1 */
|
||||
#define DUPLICATE_ENTRY ((u8)(-1))
|
||||
|
||||
/* Port Power Management Status and Control - port_power_base bitmasks */
|
||||
/* Inactivity timer value for transitions into U1, in microseconds.
|
||||
* Timeout can be up to 127us. 0xFF means an infinite timeout.
|
||||
*/
|
||||
#define PORT_U1_TIMEOUT(p) ((p) & 0xff)
|
||||
#define PORT_U1_TIMEOUT_MASK 0xff
|
||||
/* Inactivity timer value for transitions into U2 */
|
||||
#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
|
||||
#define PORT_U2_TIMEOUT_MASK (0xff << 8)
|
||||
/* Bits 24:31 for port testing */
|
||||
|
||||
/* USB2 Protocol PORTSPMSC */
|
||||
#define PORT_L1S_MASK 7
|
||||
#define PORT_L1S_SUCCESS 1
|
||||
#define PORT_RWE (1 << 3)
|
||||
#define PORT_HIRD(p) (((p) & 0xf) << 4)
|
||||
#define PORT_HIRD_MASK (0xf << 4)
|
||||
#define PORT_L1DS_MASK (0xff << 8)
|
||||
#define PORT_L1DS(p) (((p) & 0xff) << 8)
|
||||
#define PORT_HLE (1 << 16)
|
||||
#define PORT_TEST_MODE_SHIFT 28
|
||||
|
||||
/* USB3 Protocol PORTLI Port Link Information */
|
||||
#define PORT_RX_LANES(p) (((p) >> 16) & 0xf)
|
||||
#define PORT_TX_LANES(p) (((p) >> 20) & 0xf)
|
||||
|
||||
/* USB2 Protocol PORTHLPMC */
|
||||
#define PORT_HIRDM(p)((p) & 3)
|
||||
#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
|
||||
#define PORT_BESLD(p)(((p) & 0xf) << 10)
|
||||
|
||||
/* use 512 microseconds as USB2 LPM L1 default timeout. */
|
||||
#define XHCI_L1_TIMEOUT 512
|
||||
|
||||
/* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
|
||||
* Safe to use with mixed HIRD and BESL systems (host and device) and is used
|
||||
* by other operating systems.
|
||||
*
|
||||
* XHCI 1.0 errata 8/14/12 Table 13 notes:
|
||||
* "Software should choose xHC BESL/BESLD field values that do not violate a
|
||||
* device's resume latency requirements,
|
||||
* e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
|
||||
* or not program values < '4' if BLC = '0' and a BESL device is attached.
|
||||
*/
|
||||
#define XHCI_DEFAULT_BESL 4
|
||||
|
||||
/*
|
||||
* USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
|
||||
* to complete link training. usually link trainig completes much faster
|
||||
* so check status 10 times with 36ms sleep in places we need to wait for
|
||||
* polling to complete.
|
||||
*/
|
||||
#define XHCI_PORT_POLLING_LFPS_TIME 36
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user