mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-07-05 13:25:20 +02:00
Merge branch 'v6.5/standard/base' into v6.5/standard/arm-versatile-926ejs
This commit is contained in:
commit
3fdf15eaa1
|
@ -2938,6 +2938,10 @@
|
|||
locktorture.torture_type= [KNL]
|
||||
Specify the locking implementation to test.
|
||||
|
||||
locktorture.writer_fifo= [KNL]
|
||||
Run the write-side locktorture kthreads at
|
||||
sched_set_fifo() real-time priority.
|
||||
|
||||
locktorture.verbose= [KNL]
|
||||
Enable additional printk() statements.
|
||||
|
||||
|
@ -5781,6 +5785,13 @@
|
|||
This feature may be more efficiently disabled
|
||||
using the csdlock_debug- kernel parameter.
|
||||
|
||||
smp.panic_on_ipistall= [KNL]
|
||||
If a csd_lock_timeout extends for more than
|
||||
the specified number of milliseconds, panic the
|
||||
system. By default, let CSD-lock acquisition
|
||||
take as long as they take. Specifying 300,000
|
||||
for this value provides a 5-minute timeout.
|
||||
|
||||
smsc-ircc2.nopnp [HW] Don't use PNP to discover SMC devices
|
||||
smsc-ircc2.ircc_cfg= [HW] Device configuration I/O port
|
||||
smsc-ircc2.ircc_sir= [HW] SIR base I/O port
|
||||
|
|
|
@ -27,6 +27,27 @@ properties:
|
|||
|
||||
vdd3-supply: true
|
||||
|
||||
qcom,tune-usb2-disc-thres:
|
||||
$ref: /schemas/types.yaml#/definitions/uint8
|
||||
description: High-Speed disconnect threshold
|
||||
minimum: 0
|
||||
maximum: 7
|
||||
default: 0
|
||||
|
||||
qcom,tune-usb2-amplitude:
|
||||
$ref: /schemas/types.yaml#/definitions/uint8
|
||||
description: High-Speed trasmit amplitude
|
||||
minimum: 0
|
||||
maximum: 15
|
||||
default: 8
|
||||
|
||||
qcom,tune-usb2-preem:
|
||||
$ref: /schemas/types.yaml#/definitions/uint8
|
||||
description: High-Speed TX pre-emphasis tuning
|
||||
minimum: 0
|
||||
maximum: 7
|
||||
default: 5
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
|
|
@ -96,7 +96,7 @@ then:
|
|||
rts-gpios: false
|
||||
|
||||
patternProperties:
|
||||
"^bluetooth|gnss|gps|mcu$":
|
||||
"^(bluetooth|gnss|gps|mcu)$":
|
||||
if:
|
||||
type: object
|
||||
then:
|
||||
|
|
|
@ -169,27 +169,27 @@ properties:
|
|||
- const: tgib0
|
||||
- const: tgic0
|
||||
- const: tgid0
|
||||
- const: tgiv0
|
||||
- const: tciv0
|
||||
- const: tgie0
|
||||
- const: tgif0
|
||||
- const: tgia1
|
||||
- const: tgib1
|
||||
- const: tgiv1
|
||||
- const: tgiu1
|
||||
- const: tciv1
|
||||
- const: tciu1
|
||||
- const: tgia2
|
||||
- const: tgib2
|
||||
- const: tgiv2
|
||||
- const: tgiu2
|
||||
- const: tciv2
|
||||
- const: tciu2
|
||||
- const: tgia3
|
||||
- const: tgib3
|
||||
- const: tgic3
|
||||
- const: tgid3
|
||||
- const: tgiv3
|
||||
- const: tciv3
|
||||
- const: tgia4
|
||||
- const: tgib4
|
||||
- const: tgic4
|
||||
- const: tgid4
|
||||
- const: tgiv4
|
||||
- const: tciv4
|
||||
- const: tgiu5
|
||||
- const: tgiv5
|
||||
- const: tgiw5
|
||||
|
@ -197,18 +197,18 @@ properties:
|
|||
- const: tgib6
|
||||
- const: tgic6
|
||||
- const: tgid6
|
||||
- const: tgiv6
|
||||
- const: tciv6
|
||||
- const: tgia7
|
||||
- const: tgib7
|
||||
- const: tgic7
|
||||
- const: tgid7
|
||||
- const: tgiv7
|
||||
- const: tciv7
|
||||
- const: tgia8
|
||||
- const: tgib8
|
||||
- const: tgic8
|
||||
- const: tgid8
|
||||
- const: tgiv8
|
||||
- const: tgiu8
|
||||
- const: tciv8
|
||||
- const: tciu8
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
@ -285,16 +285,16 @@ examples:
|
|||
<GIC_SPI 211 IRQ_TYPE_EDGE_RISING>,
|
||||
<GIC_SPI 212 IRQ_TYPE_EDGE_RISING>,
|
||||
<GIC_SPI 213 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0", "tgiv0", "tgie0",
|
||||
interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0", "tciv0", "tgie0",
|
||||
"tgif0",
|
||||
"tgia1", "tgib1", "tgiv1", "tgiu1",
|
||||
"tgia2", "tgib2", "tgiv2", "tgiu2",
|
||||
"tgia3", "tgib3", "tgic3", "tgid3", "tgiv3",
|
||||
"tgia4", "tgib4", "tgic4", "tgid4", "tgiv4",
|
||||
"tgia1", "tgib1", "tciv1", "tciu1",
|
||||
"tgia2", "tgib2", "tciv2", "tciu2",
|
||||
"tgia3", "tgib3", "tgic3", "tgid3", "tciv3",
|
||||
"tgia4", "tgib4", "tgic4", "tgid4", "tciv4",
|
||||
"tgiu5", "tgiv5", "tgiw5",
|
||||
"tgia6", "tgib6", "tgic6", "tgid6", "tgiv6",
|
||||
"tgia7", "tgib7", "tgic7", "tgid7", "tgiv7",
|
||||
"tgia8", "tgib8", "tgic8", "tgid8", "tgiv8", "tgiu8";
|
||||
"tgia6", "tgib6", "tgic6", "tgid6", "tciv6",
|
||||
"tgia7", "tgib7", "tgic7", "tgid7", "tciv7",
|
||||
"tgia8", "tgib8", "tgic8", "tgid8", "tciv8", "tciu8";
|
||||
clocks = <&cpg CPG_MOD R9A07G044_MTU_X_MCK_MTU3>;
|
||||
power-domains = <&cpg>;
|
||||
resets = <&cpg R9A07G044_MTU_X_PRESET_MTU3>;
|
||||
|
|
|
@ -47,6 +47,7 @@ Supported adapters:
|
|||
* Intel Alder Lake (PCH)
|
||||
* Intel Raptor Lake (PCH)
|
||||
* Intel Meteor Lake (SOC and PCH)
|
||||
* Intel Birch Stream (SOC)
|
||||
|
||||
Datasheets: Publicly available at the Intel website
|
||||
|
||||
|
|
|
@ -683,6 +683,12 @@ the software port.
|
|||
time protocol.
|
||||
- Error
|
||||
|
||||
* - `ptp_cq[i]_late_cqe`
|
||||
- Number of times a CQE has been delivered on the PTP timestamping CQ when
|
||||
the CQE was not expected since a certain amount of time had elapsed where
|
||||
the device typically ensures not posting the CQE.
|
||||
- Error
|
||||
|
||||
.. [#ring_global] The corresponding ring and global counters do not share the
|
||||
same name (i.e. do not follow the common naming scheme).
|
||||
|
||||
|
|
|
@ -1,313 +0,0 @@
|
|||
.. SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
||||
.. include:: <isonum.txt>
|
||||
|
||||
=======
|
||||
Devlink
|
||||
=======
|
||||
|
||||
:Copyright: |copy| 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
Contents
|
||||
========
|
||||
|
||||
- `Info`_
|
||||
- `Parameters`_
|
||||
- `Health reporters`_
|
||||
|
||||
Info
|
||||
====
|
||||
|
||||
The devlink info reports the running and stored firmware versions on device.
|
||||
It also prints the device PSID which represents the HCA board type ID.
|
||||
|
||||
User command example::
|
||||
|
||||
$ devlink dev info pci/0000:00:06.0
|
||||
pci/0000:00:06.0:
|
||||
driver mlx5_core
|
||||
versions:
|
||||
fixed:
|
||||
fw.psid MT_0000000009
|
||||
running:
|
||||
fw.version 16.26.0100
|
||||
stored:
|
||||
fw.version 16.26.0100
|
||||
|
||||
Parameters
|
||||
==========
|
||||
|
||||
flow_steering_mode: Device flow steering mode
|
||||
---------------------------------------------
|
||||
The flow steering mode parameter controls the flow steering mode of the driver.
|
||||
Two modes are supported:
|
||||
|
||||
1. 'dmfs' - Device managed flow steering.
|
||||
2. 'smfs' - Software/Driver managed flow steering.
|
||||
|
||||
In DMFS mode, the HW steering entities are created and managed through the
|
||||
Firmware.
|
||||
In SMFS mode, the HW steering entities are created and managed though by
|
||||
the driver directly into hardware without firmware intervention.
|
||||
|
||||
SMFS mode is faster and provides better rule insertion rate compared to default DMFS mode.
|
||||
|
||||
User command examples:
|
||||
|
||||
- Set SMFS flow steering mode::
|
||||
|
||||
$ devlink dev param set pci/0000:06:00.0 name flow_steering_mode value "smfs" cmode runtime
|
||||
|
||||
- Read device flow steering mode::
|
||||
|
||||
$ devlink dev param show pci/0000:06:00.0 name flow_steering_mode
|
||||
pci/0000:06:00.0:
|
||||
name flow_steering_mode type driver-specific
|
||||
values:
|
||||
cmode runtime value smfs
|
||||
|
||||
enable_roce: RoCE enablement state
|
||||
----------------------------------
|
||||
If the device supports RoCE disablement, RoCE enablement state controls device
|
||||
support for RoCE capability. Otherwise, the control occurs in the driver stack.
|
||||
When RoCE is disabled at the driver level, only raw ethernet QPs are supported.
|
||||
|
||||
To change RoCE enablement state, a user must change the driverinit cmode value
|
||||
and run devlink reload.
|
||||
|
||||
User command examples:
|
||||
|
||||
- Disable RoCE::
|
||||
|
||||
$ devlink dev param set pci/0000:06:00.0 name enable_roce value false cmode driverinit
|
||||
$ devlink dev reload pci/0000:06:00.0
|
||||
|
||||
- Read RoCE enablement state::
|
||||
|
||||
$ devlink dev param show pci/0000:06:00.0 name enable_roce
|
||||
pci/0000:06:00.0:
|
||||
name enable_roce type generic
|
||||
values:
|
||||
cmode driverinit value true
|
||||
|
||||
esw_port_metadata: Eswitch port metadata state
|
||||
----------------------------------------------
|
||||
When applicable, disabling eswitch metadata can increase packet rate
|
||||
up to 20% depending on the use case and packet sizes.
|
||||
|
||||
Eswitch port metadata state controls whether to internally tag packets with
|
||||
metadata. Metadata tagging must be enabled for multi-port RoCE, failover
|
||||
between representors and stacked devices.
|
||||
By default metadata is enabled on the supported devices in E-switch.
|
||||
Metadata is applicable only for E-switch in switchdev mode and
|
||||
users may disable it when NONE of the below use cases will be in use:
|
||||
|
||||
1. HCA is in Dual/multi-port RoCE mode.
|
||||
2. VF/SF representor bonding (Usually used for Live migration)
|
||||
3. Stacked devices
|
||||
|
||||
When metadata is disabled, the above use cases will fail to initialize if
|
||||
users try to enable them.
|
||||
|
||||
- Show eswitch port metadata::
|
||||
|
||||
$ devlink dev param show pci/0000:06:00.0 name esw_port_metadata
|
||||
pci/0000:06:00.0:
|
||||
name esw_port_metadata type driver-specific
|
||||
values:
|
||||
cmode runtime value true
|
||||
|
||||
- Disable eswitch port metadata::
|
||||
|
||||
$ devlink dev param set pci/0000:06:00.0 name esw_port_metadata value false cmode runtime
|
||||
|
||||
- Change eswitch mode to switchdev mode where after choosing the metadata value::
|
||||
|
||||
$ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
|
||||
|
||||
hairpin_num_queues: Number of hairpin queues
|
||||
--------------------------------------------
|
||||
We refer to a TC NIC rule that involves forwarding as "hairpin".
|
||||
|
||||
Hairpin queues are mlx5 hardware specific implementation for hardware
|
||||
forwarding of such packets.
|
||||
|
||||
- Show the number of hairpin queues::
|
||||
|
||||
$ devlink dev param show pci/0000:06:00.0 name hairpin_num_queues
|
||||
pci/0000:06:00.0:
|
||||
name hairpin_num_queues type driver-specific
|
||||
values:
|
||||
cmode driverinit value 2
|
||||
|
||||
- Change the number of hairpin queues::
|
||||
|
||||
$ devlink dev param set pci/0000:06:00.0 name hairpin_num_queues value 4 cmode driverinit
|
||||
|
||||
hairpin_queue_size: Size of the hairpin queues
|
||||
----------------------------------------------
|
||||
Control the size of the hairpin queues.
|
||||
|
||||
- Show the size of the hairpin queues::
|
||||
|
||||
$ devlink dev param show pci/0000:06:00.0 name hairpin_queue_size
|
||||
pci/0000:06:00.0:
|
||||
name hairpin_queue_size type driver-specific
|
||||
values:
|
||||
cmode driverinit value 1024
|
||||
|
||||
- Change the size (in packets) of the hairpin queues::
|
||||
|
||||
$ devlink dev param set pci/0000:06:00.0 name hairpin_queue_size value 512 cmode driverinit
|
||||
|
||||
Health reporters
|
||||
================
|
||||
|
||||
tx reporter
|
||||
-----------
|
||||
The tx reporter is responsible for reporting and recovering of the following two error scenarios:
|
||||
|
||||
- tx timeout
|
||||
Report on kernel tx timeout detection.
|
||||
Recover by searching lost interrupts.
|
||||
- tx error completion
|
||||
Report on error tx completion.
|
||||
Recover by flushing the tx queue and reset it.
|
||||
|
||||
tx reporter also support on demand diagnose callback, on which it provides
|
||||
real time information of its send queues status.
|
||||
|
||||
User commands examples:
|
||||
|
||||
- Diagnose send queues status::
|
||||
|
||||
$ devlink health diagnose pci/0000:82:00.0 reporter tx
|
||||
|
||||
.. note::
|
||||
This command has valid output only when interface is up, otherwise the command has empty output.
|
||||
|
||||
- Show number of tx errors indicated, number of recover flows ended successfully,
|
||||
is autorecover enabled and graceful period from last recover::
|
||||
|
||||
$ devlink health show pci/0000:82:00.0 reporter tx
|
||||
|
||||
rx reporter
|
||||
-----------
|
||||
The rx reporter is responsible for reporting and recovering of the following two error scenarios:
|
||||
|
||||
- rx queues' initialization (population) timeout
|
||||
Population of rx queues' descriptors on ring initialization is done
|
||||
in napi context via triggering an irq. In case of a failure to get
|
||||
the minimum amount of descriptors, a timeout would occur, and
|
||||
descriptors could be recovered by polling the EQ (Event Queue).
|
||||
- rx completions with errors (reported by HW on interrupt context)
|
||||
Report on rx completion error.
|
||||
Recover (if needed) by flushing the related queue and reset it.
|
||||
|
||||
rx reporter also supports on demand diagnose callback, on which it
|
||||
provides real time information of its receive queues' status.
|
||||
|
||||
- Diagnose rx queues' status and corresponding completion queue::
|
||||
|
||||
$ devlink health diagnose pci/0000:82:00.0 reporter rx
|
||||
|
||||
NOTE: This command has valid output only when interface is up. Otherwise, the command has empty output.
|
||||
|
||||
- Show number of rx errors indicated, number of recover flows ended successfully,
|
||||
is autorecover enabled, and graceful period from last recover::
|
||||
|
||||
$ devlink health show pci/0000:82:00.0 reporter rx
|
||||
|
||||
fw reporter
|
||||
-----------
|
||||
The fw reporter implements `diagnose` and `dump` callbacks.
|
||||
It follows symptoms of fw error such as fw syndrome by triggering
|
||||
fw core dump and storing it into the dump buffer.
|
||||
The fw reporter diagnose command can be triggered any time by the user to check
|
||||
current fw status.
|
||||
|
||||
User commands examples:
|
||||
|
||||
- Check fw heath status::
|
||||
|
||||
$ devlink health diagnose pci/0000:82:00.0 reporter fw
|
||||
|
||||
- Read FW core dump if already stored or trigger new one::
|
||||
|
||||
$ devlink health dump show pci/0000:82:00.0 reporter fw
|
||||
|
||||
.. note::
|
||||
This command can run only on the PF which has fw tracer ownership,
|
||||
running it on other PF or any VF will return "Operation not permitted".
|
||||
|
||||
fw fatal reporter
|
||||
-----------------
|
||||
The fw fatal reporter implements `dump` and `recover` callbacks.
|
||||
It follows fatal errors indications by CR-space dump and recover flow.
|
||||
The CR-space dump uses vsc interface which is valid even if the FW command
|
||||
interface is not functional, which is the case in most FW fatal errors.
|
||||
The recover function runs recover flow which reloads the driver and triggers fw
|
||||
reset if needed.
|
||||
On firmware error, the health buffer is dumped into the dmesg. The log
|
||||
level is derived from the error's severity (given in health buffer).
|
||||
|
||||
User commands examples:
|
||||
|
||||
- Run fw recover flow manually::
|
||||
|
||||
$ devlink health recover pci/0000:82:00.0 reporter fw_fatal
|
||||
|
||||
- Read FW CR-space dump if already stored or trigger new one::
|
||||
|
||||
$ devlink health dump show pci/0000:82:00.1 reporter fw_fatal
|
||||
|
||||
.. note::
|
||||
This command can run only on PF.
|
||||
|
||||
vnic reporter
|
||||
-------------
|
||||
The vnic reporter implements only the `diagnose` callback.
|
||||
It is responsible for querying the vnic diagnostic counters from fw and displaying
|
||||
them in realtime.
|
||||
|
||||
Description of the vnic counters:
|
||||
|
||||
- total_q_under_processor_handle
|
||||
number of queues in an error state due to
|
||||
an async error or errored command.
|
||||
- send_queue_priority_update_flow
|
||||
number of QP/SQ priority/SL update events.
|
||||
- cq_overrun
|
||||
number of times CQ entered an error state due to an overflow.
|
||||
- async_eq_overrun
|
||||
number of times an EQ mapped to async events was overrun.
|
||||
comp_eq_overrun number of times an EQ mapped to completion events was
|
||||
overrun.
|
||||
- quota_exceeded_command
|
||||
number of commands issued and failed due to quota exceeded.
|
||||
- invalid_command
|
||||
number of commands issued and failed dues to any reason other than quota
|
||||
exceeded.
|
||||
- nic_receive_steering_discard
|
||||
number of packets that completed RX flow
|
||||
steering but were discarded due to a mismatch in flow table.
|
||||
- generated_pkt_steering_fail
|
||||
number of packets generated by the VNIC experiencing unexpected steering
|
||||
failure (at any point in steering flow).
|
||||
- handled_pkt_steering_fail
|
||||
number of packets handled by the VNIC experiencing unexpected steering
|
||||
failure (at any point in steering flow owned by the VNIC, including the FDB
|
||||
for the eswitch owner).
|
||||
|
||||
User commands examples:
|
||||
|
||||
- Diagnose PF/VF vnic counters::
|
||||
|
||||
$ devlink health diagnose pci/0000:82:00.1 reporter vnic
|
||||
|
||||
- Diagnose representor vnic counters (performed by supplying devlink port of the
|
||||
representor, which can be obtained via devlink port command)::
|
||||
|
||||
$ devlink health diagnose pci/0000:82:00.1/65537 reporter vnic
|
||||
|
||||
.. note::
|
||||
This command can run over all interfaces such as PF/VF and representor ports.
|
|
@ -13,7 +13,6 @@ Contents:
|
|||
:maxdepth: 2
|
||||
|
||||
kconfig
|
||||
devlink
|
||||
switchdev
|
||||
tracepoints
|
||||
counters
|
||||
|
|
|
@ -18,6 +18,11 @@ Parameters
|
|||
* - ``enable_roce``
|
||||
- driverinit
|
||||
- Type: Boolean
|
||||
|
||||
If the device supports RoCE disablement, RoCE enablement state controls
|
||||
device support for RoCE capability. Otherwise, the control occurs in the
|
||||
driver stack. When RoCE is disabled at the driver level, only raw
|
||||
ethernet QPs are supported.
|
||||
* - ``io_eq_size``
|
||||
- driverinit
|
||||
- The range is between 64 and 4096.
|
||||
|
@ -48,6 +53,9 @@ parameters.
|
|||
* ``smfs`` Software managed flow steering. In SMFS mode, the HW
|
||||
steering entities are created and manage through the driver without
|
||||
firmware intervention.
|
||||
|
||||
SMFS mode is faster and provides better rule insertion rate compared to
|
||||
default DMFS mode.
|
||||
* - ``fdb_large_groups``
|
||||
- u32
|
||||
- driverinit
|
||||
|
@ -71,7 +79,24 @@ parameters.
|
|||
deprecated.
|
||||
|
||||
Default: disabled
|
||||
* - ``esw_port_metadata``
|
||||
- Boolean
|
||||
- runtime
|
||||
- When applicable, disabling eswitch metadata can increase packet rate up
|
||||
to 20% depending on the use case and packet sizes.
|
||||
|
||||
Eswitch port metadata state controls whether to internally tag packets
|
||||
with metadata. Metadata tagging must be enabled for multi-port RoCE,
|
||||
failover between representors and stacked devices. By default metadata is
|
||||
enabled on the supported devices in E-switch. Metadata is applicable only
|
||||
for E-switch in switchdev mode and users may disable it when NONE of the
|
||||
below use cases will be in use:
|
||||
1. HCA is in Dual/multi-port RoCE mode.
|
||||
2. VF/SF representor bonding (Usually used for Live migration)
|
||||
3. Stacked devices
|
||||
|
||||
When metadata is disabled, the above use cases will fail to initialize if
|
||||
users try to enable them.
|
||||
* - ``hairpin_num_queues``
|
||||
- u32
|
||||
- driverinit
|
||||
|
@ -104,3 +129,160 @@ The ``mlx5`` driver reports the following versions
|
|||
* - ``fw.version``
|
||||
- stored, running
|
||||
- Three digit major.minor.subminor firmware version number.
|
||||
|
||||
Health reporters
|
||||
================
|
||||
|
||||
tx reporter
|
||||
-----------
|
||||
The tx reporter is responsible for reporting and recovering of the following three error scenarios:
|
||||
|
||||
- tx timeout
|
||||
Report on kernel tx timeout detection.
|
||||
Recover by searching lost interrupts.
|
||||
- tx error completion
|
||||
Report on error tx completion.
|
||||
Recover by flushing the tx queue and reset it.
|
||||
- tx PTP port timestamping CQ unhealthy
|
||||
Report too many CQEs never delivered on port ts CQ.
|
||||
Recover by flushing and re-creating all PTP channels.
|
||||
|
||||
tx reporter also support on demand diagnose callback, on which it provides
|
||||
real time information of its send queues status.
|
||||
|
||||
User commands examples:
|
||||
|
||||
- Diagnose send queues status::
|
||||
|
||||
$ devlink health diagnose pci/0000:82:00.0 reporter tx
|
||||
|
||||
.. note::
|
||||
This command has valid output only when interface is up, otherwise the command has empty output.
|
||||
|
||||
- Show number of tx errors indicated, number of recover flows ended successfully,
|
||||
is autorecover enabled and graceful period from last recover::
|
||||
|
||||
$ devlink health show pci/0000:82:00.0 reporter tx
|
||||
|
||||
rx reporter
|
||||
-----------
|
||||
The rx reporter is responsible for reporting and recovering of the following two error scenarios:
|
||||
|
||||
- rx queues' initialization (population) timeout
|
||||
Population of rx queues' descriptors on ring initialization is done
|
||||
in napi context via triggering an irq. In case of a failure to get
|
||||
the minimum amount of descriptors, a timeout would occur, and
|
||||
descriptors could be recovered by polling the EQ (Event Queue).
|
||||
- rx completions with errors (reported by HW on interrupt context)
|
||||
Report on rx completion error.
|
||||
Recover (if needed) by flushing the related queue and reset it.
|
||||
|
||||
rx reporter also supports on demand diagnose callback, on which it
|
||||
provides real time information of its receive queues' status.
|
||||
|
||||
- Diagnose rx queues' status and corresponding completion queue::
|
||||
|
||||
$ devlink health diagnose pci/0000:82:00.0 reporter rx
|
||||
|
||||
.. note::
|
||||
This command has valid output only when interface is up. Otherwise, the command has empty output.
|
||||
|
||||
- Show number of rx errors indicated, number of recover flows ended successfully,
|
||||
is autorecover enabled, and graceful period from last recover::
|
||||
|
||||
$ devlink health show pci/0000:82:00.0 reporter rx
|
||||
|
||||
fw reporter
|
||||
-----------
|
||||
The fw reporter implements `diagnose` and `dump` callbacks.
|
||||
It follows symptoms of fw error such as fw syndrome by triggering
|
||||
fw core dump and storing it into the dump buffer.
|
||||
The fw reporter diagnose command can be triggered any time by the user to check
|
||||
current fw status.
|
||||
|
||||
User commands examples:
|
||||
|
||||
- Check fw heath status::
|
||||
|
||||
$ devlink health diagnose pci/0000:82:00.0 reporter fw
|
||||
|
||||
- Read FW core dump if already stored or trigger new one::
|
||||
|
||||
$ devlink health dump show pci/0000:82:00.0 reporter fw
|
||||
|
||||
.. note::
|
||||
This command can run only on the PF which has fw tracer ownership,
|
||||
running it on other PF or any VF will return "Operation not permitted".
|
||||
|
||||
fw fatal reporter
|
||||
-----------------
|
||||
The fw fatal reporter implements `dump` and `recover` callbacks.
|
||||
It follows fatal errors indications by CR-space dump and recover flow.
|
||||
The CR-space dump uses vsc interface which is valid even if the FW command
|
||||
interface is not functional, which is the case in most FW fatal errors.
|
||||
The recover function runs recover flow which reloads the driver and triggers fw
|
||||
reset if needed.
|
||||
On firmware error, the health buffer is dumped into the dmesg. The log
|
||||
level is derived from the error's severity (given in health buffer).
|
||||
|
||||
User commands examples:
|
||||
|
||||
- Run fw recover flow manually::
|
||||
|
||||
$ devlink health recover pci/0000:82:00.0 reporter fw_fatal
|
||||
|
||||
- Read FW CR-space dump if already stored or trigger new one::
|
||||
|
||||
$ devlink health dump show pci/0000:82:00.1 reporter fw_fatal
|
||||
|
||||
.. note::
|
||||
This command can run only on PF.
|
||||
|
||||
vnic reporter
|
||||
-------------
|
||||
The vnic reporter implements only the `diagnose` callback.
|
||||
It is responsible for querying the vnic diagnostic counters from fw and displaying
|
||||
them in realtime.
|
||||
|
||||
Description of the vnic counters:
|
||||
|
||||
- total_q_under_processor_handle
|
||||
number of queues in an error state due to
|
||||
an async error or errored command.
|
||||
- send_queue_priority_update_flow
|
||||
number of QP/SQ priority/SL update events.
|
||||
- cq_overrun
|
||||
number of times CQ entered an error state due to an overflow.
|
||||
- async_eq_overrun
|
||||
number of times an EQ mapped to async events was overrun.
|
||||
comp_eq_overrun number of times an EQ mapped to completion events was
|
||||
overrun.
|
||||
- quota_exceeded_command
|
||||
number of commands issued and failed due to quota exceeded.
|
||||
- invalid_command
|
||||
number of commands issued and failed dues to any reason other than quota
|
||||
exceeded.
|
||||
- nic_receive_steering_discard
|
||||
number of packets that completed RX flow
|
||||
steering but were discarded due to a mismatch in flow table.
|
||||
- generated_pkt_steering_fail
|
||||
number of packets generated by the VNIC experiencing unexpected steering
|
||||
failure (at any point in steering flow).
|
||||
- handled_pkt_steering_fail
|
||||
number of packets handled by the VNIC experiencing unexpected steering
|
||||
failure (at any point in steering flow owned by the VNIC, including the FDB
|
||||
for the eswitch owner).
|
||||
|
||||
User commands examples:
|
||||
|
||||
- Diagnose PF/VF vnic counters::
|
||||
|
||||
$ devlink health diagnose pci/0000:82:00.1 reporter vnic
|
||||
|
||||
- Diagnose representor vnic counters (performed by supplying devlink port of the
|
||||
representor, which can be obtained via devlink port command)::
|
||||
|
||||
$ devlink health diagnose pci/0000:82:00.1/65537 reporter vnic
|
||||
|
||||
.. note::
|
||||
This command can run over all interfaces such as PF/VF and representor ports.
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 5
|
||||
SUBLEVEL = 12
|
||||
SUBLEVEL = 13
|
||||
EXTRAVERSION =
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
|
|
|
@ -10,10 +10,6 @@
|
|||
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
#define __exception_irq_entry __irq_entry
|
||||
#else
|
||||
#define __exception_irq_entry
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_ARM_EXCEPTION_H */
|
||||
|
|
|
@ -1369,6 +1369,8 @@ choice
|
|||
config CPU_BIG_ENDIAN
|
||||
bool "Build big-endian kernel"
|
||||
depends on !LD_IS_LLD || LLD_VERSION >= 130000
|
||||
# https://github.com/llvm/llvm-project/commit/1379b150991f70a5782e9a143c2ba5308da1161c
|
||||
depends on AS_IS_GNU || AS_VERSION >= 150000
|
||||
help
|
||||
Say Y if you plan on running a kernel with a big-endian userspace.
|
||||
|
||||
|
|
|
@ -1186,8 +1186,14 @@
|
|||
dma-coherent;
|
||||
};
|
||||
|
||||
bus: bus {
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
compatible = "simple-bus";
|
||||
ranges;
|
||||
dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x00000000>;
|
||||
|
||||
usb0: usb@3100000 {
|
||||
status = "disabled";
|
||||
compatible = "snps,dwc3";
|
||||
reg = <0x0 0x3100000 0x0 0x10000>;
|
||||
interrupts = <0 80 0x4>; /* Level high type */
|
||||
|
@ -1195,10 +1201,10 @@
|
|||
snps,quirk-frame-length-adjustment = <0x20>;
|
||||
snps,dis_rxdet_inp3_quirk;
|
||||
snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
usb1: usb@3110000 {
|
||||
status = "disabled";
|
||||
compatible = "snps,dwc3";
|
||||
reg = <0x0 0x3110000 0x0 0x10000>;
|
||||
interrupts = <0 81 0x4>; /* Level high type */
|
||||
|
@ -1206,6 +1212,8 @@
|
|||
snps,quirk-frame-length-adjustment = <0x20>;
|
||||
snps,dis_rxdet_inp3_quirk;
|
||||
snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
||||
ccn@4000000 {
|
||||
|
|
|
@ -135,7 +135,7 @@
|
|||
reg = <0x0 0x4a800000 0x0 0x100000>;
|
||||
no-map;
|
||||
|
||||
hwlocks = <&tcsr_mutex 0>;
|
||||
hwlocks = <&tcsr_mutex 3>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -207,7 +207,7 @@
|
|||
smem {
|
||||
compatible = "qcom,smem";
|
||||
memory-region = <&smem_region>;
|
||||
hwlocks = <&tcsr_mutex 0>;
|
||||
hwlocks = <&tcsr_mutex 3>;
|
||||
};
|
||||
|
||||
soc: soc@0 {
|
||||
|
@ -389,7 +389,7 @@
|
|||
|
||||
tcsr_mutex: hwlock@1905000 {
|
||||
compatible = "qcom,ipq6018-tcsr-mutex", "qcom,tcsr-mutex";
|
||||
reg = <0x0 0x01905000 0x0 0x1000>;
|
||||
reg = <0x0 0x01905000 0x0 0x20000>;
|
||||
#hwlock-cells = <1>;
|
||||
};
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@
|
|||
reg = <0x0 0x4ab00000 0x0 0x100000>;
|
||||
no-map;
|
||||
|
||||
hwlocks = <&tcsr_mutex 0>;
|
||||
hwlocks = <&tcsr_mutex 3>;
|
||||
};
|
||||
|
||||
memory@4ac00000 {
|
||||
|
|
|
@ -174,7 +174,7 @@
|
|||
smem@4aa00000 {
|
||||
compatible = "qcom,smem";
|
||||
reg = <0x0 0x4aa00000 0x0 0x100000>;
|
||||
hwlocks = <&tcsr_mutex 0>;
|
||||
hwlocks = <&tcsr_mutex 3>;
|
||||
no-map;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -167,9 +167,6 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
|
|||
switch (ELF64_R_TYPE(rela[i].r_info)) {
|
||||
case R_AARCH64_JUMP26:
|
||||
case R_AARCH64_CALL26:
|
||||
if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
|
||||
break;
|
||||
|
||||
/*
|
||||
* We only have to consider branch targets that resolve
|
||||
* to symbols that are defined in a different section.
|
||||
|
@ -269,9 +266,6 @@ static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela,
|
|||
{
|
||||
int i = 0, j = numrels - 1;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
|
||||
return 0;
|
||||
|
||||
while (i < j) {
|
||||
if (branch_rela_needs_plt(syms, &rela[i], dstidx))
|
||||
i++;
|
||||
|
|
|
@ -32,7 +32,7 @@ static inline void set_my_cpu_offset(unsigned long off)
|
|||
#define __my_cpu_offset __my_cpu_offset
|
||||
|
||||
#define PERCPU_OP(op, asm_op, c_op) \
|
||||
static inline unsigned long __percpu_##op(void *ptr, \
|
||||
static __always_inline unsigned long __percpu_##op(void *ptr, \
|
||||
unsigned long val, int size) \
|
||||
{ \
|
||||
unsigned long ret; \
|
||||
|
@ -63,7 +63,7 @@ PERCPU_OP(and, and, &)
|
|||
PERCPU_OP(or, or, |)
|
||||
#undef PERCPU_OP
|
||||
|
||||
static inline unsigned long __percpu_read(void *ptr, int size)
|
||||
static __always_inline unsigned long __percpu_read(void *ptr, int size)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
|
@ -100,7 +100,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline void __percpu_write(void *ptr, unsigned long val, int size)
|
||||
static __always_inline void __percpu_write(void *ptr, unsigned long val, int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
|
@ -132,7 +132,7 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
|
|||
}
|
||||
}
|
||||
|
||||
static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
|
||||
static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
|
||||
int size)
|
||||
{
|
||||
switch (size) {
|
||||
|
|
|
@ -472,6 +472,7 @@ struct pdc_model { /* for PDC_MODEL */
|
|||
unsigned long arch_rev;
|
||||
unsigned long pot_key;
|
||||
unsigned long curr_key;
|
||||
unsigned long width; /* default of PSW_W bit (1=enabled) */
|
||||
};
|
||||
|
||||
struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */
|
||||
|
|
|
@ -36,6 +36,24 @@
|
|||
.level 2.0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We need seven instructions after a TLB insert for it to take effect.
|
||||
* The PA8800/PA8900 processors are an exception and need 12 instructions.
|
||||
* The RFI changes both IAOQ_Back and IAOQ_Front, so it counts as one.
|
||||
*/
|
||||
#ifdef CONFIG_64BIT
|
||||
#define NUM_PIPELINE_INSNS 12
|
||||
#else
|
||||
#define NUM_PIPELINE_INSNS 7
|
||||
#endif
|
||||
|
||||
/* Insert num nops */
|
||||
.macro insert_nops num
|
||||
.rept \num
|
||||
nop
|
||||
.endr
|
||||
.endm
|
||||
|
||||
/* Get aligned page_table_lock address for this mm from cr28/tr4 */
|
||||
.macro get_ptl reg
|
||||
mfctl %cr28,\reg
|
||||
|
@ -415,24 +433,20 @@
|
|||
3:
|
||||
.endm
|
||||
|
||||
/* Release page_table_lock without reloading lock address.
|
||||
We use an ordered store to ensure all prior accesses are
|
||||
performed prior to releasing the lock. */
|
||||
.macro ptl_unlock0 spc,tmp,tmp2
|
||||
/* Release page_table_lock if for user space. We use an ordered
|
||||
store to ensure all prior accesses are performed prior to
|
||||
releasing the lock. Note stw may not be executed, so we
|
||||
provide one extra nop when CONFIG_TLB_PTLOCK is defined. */
|
||||
.macro ptl_unlock spc,tmp,tmp2
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
98: ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
|
||||
98: get_ptl \tmp
|
||||
ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
|
||||
or,COND(=) %r0,\spc,%r0
|
||||
stw,ma \tmp2,0(\tmp)
|
||||
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* Release page_table_lock. */
|
||||
.macro ptl_unlock1 spc,tmp,tmp2
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
98: get_ptl \tmp
|
||||
ptl_unlock0 \spc,\tmp,\tmp2
|
||||
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
|
||||
insert_nops NUM_PIPELINE_INSNS - 4
|
||||
#else
|
||||
insert_nops NUM_PIPELINE_INSNS - 1
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
@ -461,13 +475,13 @@
|
|||
* to a CPU TLB 4k PFN (4k => 12 bits to shift) */
|
||||
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
|
||||
#define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
|
||||
#define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
|
||||
|
||||
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
|
||||
.macro convert_for_tlb_insert20 pte,tmp
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
copy \pte,\tmp
|
||||
extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
|
||||
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
|
||||
extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
|
||||
|
||||
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
|
||||
(63-58)+PAGE_ADD_SHIFT,\pte
|
||||
|
@ -475,8 +489,7 @@
|
|||
depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
|
||||
(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
|
||||
#else /* Huge pages disabled */
|
||||
extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
|
||||
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
|
||||
extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte
|
||||
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
|
||||
(63-58)+PAGE_ADD_SHIFT,\pte
|
||||
#endif
|
||||
|
@ -1124,7 +1137,7 @@ dtlb_miss_20w:
|
|||
|
||||
idtlbt pte,prot
|
||||
|
||||
ptl_unlock1 spc,t0,t1
|
||||
ptl_unlock spc,t0,t1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1133,6 +1146,7 @@ dtlb_check_alias_20w:
|
|||
|
||||
idtlbt pte,prot
|
||||
|
||||
insert_nops NUM_PIPELINE_INSNS - 1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1150,7 +1164,7 @@ nadtlb_miss_20w:
|
|||
|
||||
idtlbt pte,prot
|
||||
|
||||
ptl_unlock1 spc,t0,t1
|
||||
ptl_unlock spc,t0,t1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1159,6 +1173,7 @@ nadtlb_check_alias_20w:
|
|||
|
||||
idtlbt pte,prot
|
||||
|
||||
insert_nops NUM_PIPELINE_INSNS - 1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1184,7 +1199,7 @@ dtlb_miss_11:
|
|||
|
||||
mtsp t1, %sr1 /* Restore sr1 */
|
||||
|
||||
ptl_unlock1 spc,t0,t1
|
||||
ptl_unlock spc,t0,t1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1194,6 +1209,7 @@ dtlb_check_alias_11:
|
|||
idtlba pte,(va)
|
||||
idtlbp prot,(va)
|
||||
|
||||
insert_nops NUM_PIPELINE_INSNS - 1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1217,7 +1233,7 @@ nadtlb_miss_11:
|
|||
|
||||
mtsp t1, %sr1 /* Restore sr1 */
|
||||
|
||||
ptl_unlock1 spc,t0,t1
|
||||
ptl_unlock spc,t0,t1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1227,6 +1243,7 @@ nadtlb_check_alias_11:
|
|||
idtlba pte,(va)
|
||||
idtlbp prot,(va)
|
||||
|
||||
insert_nops NUM_PIPELINE_INSNS - 1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1246,7 +1263,7 @@ dtlb_miss_20:
|
|||
|
||||
idtlbt pte,prot
|
||||
|
||||
ptl_unlock1 spc,t0,t1
|
||||
ptl_unlock spc,t0,t1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1255,6 +1272,7 @@ dtlb_check_alias_20:
|
|||
|
||||
idtlbt pte,prot
|
||||
|
||||
insert_nops NUM_PIPELINE_INSNS - 1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1274,7 +1292,7 @@ nadtlb_miss_20:
|
|||
|
||||
idtlbt pte,prot
|
||||
|
||||
ptl_unlock1 spc,t0,t1
|
||||
ptl_unlock spc,t0,t1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1283,6 +1301,7 @@ nadtlb_check_alias_20:
|
|||
|
||||
idtlbt pte,prot
|
||||
|
||||
insert_nops NUM_PIPELINE_INSNS - 1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1319,7 +1338,7 @@ itlb_miss_20w:
|
|||
|
||||
iitlbt pte,prot
|
||||
|
||||
ptl_unlock1 spc,t0,t1
|
||||
ptl_unlock spc,t0,t1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1343,7 +1362,7 @@ naitlb_miss_20w:
|
|||
|
||||
iitlbt pte,prot
|
||||
|
||||
ptl_unlock1 spc,t0,t1
|
||||
ptl_unlock spc,t0,t1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1352,6 +1371,7 @@ naitlb_check_alias_20w:
|
|||
|
||||
iitlbt pte,prot
|
||||
|
||||
insert_nops NUM_PIPELINE_INSNS - 1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1377,7 +1397,7 @@ itlb_miss_11:
|
|||
|
||||
mtsp t1, %sr1 /* Restore sr1 */
|
||||
|
||||
ptl_unlock1 spc,t0,t1
|
||||
ptl_unlock spc,t0,t1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1401,7 +1421,7 @@ naitlb_miss_11:
|
|||
|
||||
mtsp t1, %sr1 /* Restore sr1 */
|
||||
|
||||
ptl_unlock1 spc,t0,t1
|
||||
ptl_unlock spc,t0,t1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1411,6 +1431,7 @@ naitlb_check_alias_11:
|
|||
iitlba pte,(%sr0, va)
|
||||
iitlbp prot,(%sr0, va)
|
||||
|
||||
insert_nops NUM_PIPELINE_INSNS - 1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1431,7 +1452,7 @@ itlb_miss_20:
|
|||
|
||||
iitlbt pte,prot
|
||||
|
||||
ptl_unlock1 spc,t0,t1
|
||||
ptl_unlock spc,t0,t1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1451,7 +1472,7 @@ naitlb_miss_20:
|
|||
|
||||
iitlbt pte,prot
|
||||
|
||||
ptl_unlock1 spc,t0,t1
|
||||
ptl_unlock spc,t0,t1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1460,6 +1481,7 @@ naitlb_check_alias_20:
|
|||
|
||||
iitlbt pte,prot
|
||||
|
||||
insert_nops NUM_PIPELINE_INSNS - 1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1481,7 +1503,7 @@ dbit_trap_20w:
|
|||
|
||||
idtlbt pte,prot
|
||||
|
||||
ptl_unlock0 spc,t0,t1
|
||||
ptl_unlock spc,t0,t1
|
||||
rfir
|
||||
nop
|
||||
#else
|
||||
|
@ -1507,7 +1529,7 @@ dbit_trap_11:
|
|||
|
||||
mtsp t1, %sr1 /* Restore sr1 */
|
||||
|
||||
ptl_unlock0 spc,t0,t1
|
||||
ptl_unlock spc,t0,t1
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1527,7 +1549,7 @@ dbit_trap_20:
|
|||
|
||||
idtlbt pte,prot
|
||||
|
||||
ptl_unlock0 spc,t0,t1
|
||||
ptl_unlock spc,t0,t1
|
||||
rfir
|
||||
nop
|
||||
#endif
|
||||
|
|
|
@ -70,9 +70,8 @@ $bss_loop:
|
|||
stw,ma %arg2,4(%r1)
|
||||
stw,ma %arg3,4(%r1)
|
||||
|
||||
#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
|
||||
/* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
|
||||
* and halt kernel if we detect a PA1.x CPU. */
|
||||
#if defined(CONFIG_PA20)
|
||||
/* check for 64-bit capable CPU as required by current kernel */
|
||||
ldi 32,%r10
|
||||
mtctl %r10,%cr11
|
||||
.level 2.0
|
||||
|
|
|
@ -1371,7 +1371,6 @@ static void power_pmu_disable(struct pmu *pmu)
|
|||
/*
|
||||
* Disable instruction sampling if it was enabled
|
||||
*/
|
||||
if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE)
|
||||
val &= ~MMCRA_SAMPLE_ENABLE;
|
||||
|
||||
/* Disable BHRB via mmcra (BHRBRD) for p10 */
|
||||
|
@ -1383,7 +1382,7 @@ static void power_pmu_disable(struct pmu *pmu)
|
|||
* instruction sampling or BHRB.
|
||||
*/
|
||||
if (val != mmcra) {
|
||||
mtspr(SPRN_MMCRA, mmcra);
|
||||
mtspr(SPRN_MMCRA, val);
|
||||
mb();
|
||||
isync();
|
||||
}
|
||||
|
|
|
@ -24,13 +24,20 @@
|
|||
#include <linux/uaccess.h>
|
||||
|
||||
|
||||
struct opal_prd_msg {
|
||||
union {
|
||||
struct opal_prd_msg_header header;
|
||||
DECLARE_FLEX_ARRAY(u8, data);
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* The msg member must be at the end of the struct, as it's followed by the
|
||||
* message data.
|
||||
*/
|
||||
struct opal_prd_msg_queue_item {
|
||||
struct list_head list;
|
||||
struct opal_prd_msg_header msg;
|
||||
struct opal_prd_msg msg;
|
||||
};
|
||||
|
||||
static struct device_node *prd_node;
|
||||
|
@ -156,7 +163,7 @@ static ssize_t opal_prd_read(struct file *file, char __user *buf,
|
|||
int rc;
|
||||
|
||||
/* we need at least a header's worth of data */
|
||||
if (count < sizeof(item->msg))
|
||||
if (count < sizeof(item->msg.header))
|
||||
return -EINVAL;
|
||||
|
||||
if (*ppos)
|
||||
|
@ -186,7 +193,7 @@ static ssize_t opal_prd_read(struct file *file, char __user *buf,
|
|||
return -EINTR;
|
||||
}
|
||||
|
||||
size = be16_to_cpu(item->msg.size);
|
||||
size = be16_to_cpu(item->msg.header.size);
|
||||
if (size > count) {
|
||||
err = -EINVAL;
|
||||
goto err_requeue;
|
||||
|
@ -352,7 +359,7 @@ static int opal_prd_msg_notifier(struct notifier_block *nb,
|
|||
if (!item)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(&item->msg, msg->params, msg_size);
|
||||
memcpy(&item->msg.data, msg->params, msg_size);
|
||||
|
||||
spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
|
||||
list_add_tail(&item->list, &opal_prd_msg_queue);
|
||||
|
|
|
@ -25,7 +25,6 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
|
|||
DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
|
||||
DECLARE_DO_ERROR_INFO(do_trap_break);
|
||||
|
||||
asmlinkage unsigned long get_overflow_stack(void);
|
||||
asmlinkage void handle_bad_stack(struct pt_regs *regs);
|
||||
asmlinkage void do_page_fault(struct pt_regs *regs);
|
||||
asmlinkage void do_irq(struct pt_regs *regs);
|
||||
|
|
|
@ -82,6 +82,28 @@
|
|||
.endr
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#ifdef CONFIG_32BIT
|
||||
#define PER_CPU_OFFSET_SHIFT 2
|
||||
#else
|
||||
#define PER_CPU_OFFSET_SHIFT 3
|
||||
#endif
|
||||
|
||||
.macro asm_per_cpu dst sym tmp
|
||||
REG_L \tmp, TASK_TI_CPU_NUM(tp)
|
||||
slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
|
||||
la \dst, __per_cpu_offset
|
||||
add \dst, \dst, \tmp
|
||||
REG_L \tmp, 0(\dst)
|
||||
la \dst, \sym
|
||||
add \dst, \dst, \tmp
|
||||
.endm
|
||||
#else /* CONFIG_SMP */
|
||||
.macro asm_per_cpu dst sym tmp
|
||||
la \dst, \sym
|
||||
.endm
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/* save all GPs except x1 ~ x5 */
|
||||
.macro save_from_x6_to_x31
|
||||
REG_S x6, PT_T1(sp)
|
||||
|
|
|
@ -10,4 +10,9 @@
|
|||
|
||||
#define RISCV_HWPROBE_MAX_KEY 5
|
||||
|
||||
static inline bool riscv_hwprobe_key_is_valid(__s64 key)
|
||||
{
|
||||
return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -33,8 +33,8 @@
|
|||
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
|
||||
#endif
|
||||
/*
|
||||
* By default, CONFIG_PAGE_OFFSET value corresponds to SV48 address space so
|
||||
* define the PAGE_OFFSET value for SV39.
|
||||
* By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
|
||||
* define the PAGE_OFFSET value for SV48 and SV39.
|
||||
*/
|
||||
#define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL)
|
||||
#define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL)
|
||||
|
|
|
@ -34,9 +34,6 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
|
||||
extern unsigned long spin_shadow_stack;
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/csr.h>
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ static inline void cpu_relax(void)
|
|||
__asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
|
||||
#endif
|
||||
|
||||
#ifdef __riscv_zihintpause
|
||||
#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
|
||||
/*
|
||||
* Reduce instruction retirement.
|
||||
* This assumes the PC changes.
|
||||
|
|
|
@ -39,6 +39,7 @@ void asm_offsets(void)
|
|||
OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
|
||||
OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
|
||||
|
||||
OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
|
||||
OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
|
||||
OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]);
|
||||
OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]);
|
||||
|
|
|
@ -10,9 +10,13 @@
|
|||
#include <asm/asm.h>
|
||||
#include <asm/csr.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/errata_list.h>
|
||||
#include <linux/sizes.h>
|
||||
|
||||
.section .irqentry.text, "ax"
|
||||
|
||||
SYM_CODE_START(handle_exception)
|
||||
/*
|
||||
|
@ -170,67 +174,15 @@ SYM_CODE_END(ret_from_exception)
|
|||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
|
||||
/*
|
||||
* Takes the psuedo-spinlock for the shadow stack, in case multiple
|
||||
* harts are concurrently overflowing their kernel stacks. We could
|
||||
* store any value here, but since we're overflowing the kernel stack
|
||||
* already we only have SP to use as a scratch register. So we just
|
||||
* swap in the address of the spinlock, as that's definately non-zero.
|
||||
*
|
||||
* Pairs with a store_release in handle_bad_stack().
|
||||
*/
|
||||
1: la sp, spin_shadow_stack
|
||||
REG_AMOSWAP_AQ sp, sp, (sp)
|
||||
bnez sp, 1b
|
||||
/* we reach here from kernel context, sscratch must be 0 */
|
||||
csrrw x31, CSR_SCRATCH, x31
|
||||
asm_per_cpu sp, overflow_stack, x31
|
||||
li x31, OVERFLOW_STACK_SIZE
|
||||
add sp, sp, x31
|
||||
/* zero out x31 again and restore x31 */
|
||||
xor x31, x31, x31
|
||||
csrrw x31, CSR_SCRATCH, x31
|
||||
|
||||
la sp, shadow_stack
|
||||
addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
|
||||
|
||||
//save caller register to shadow stack
|
||||
addi sp, sp, -(PT_SIZE_ON_STACK)
|
||||
REG_S x1, PT_RA(sp)
|
||||
REG_S x5, PT_T0(sp)
|
||||
REG_S x6, PT_T1(sp)
|
||||
REG_S x7, PT_T2(sp)
|
||||
REG_S x10, PT_A0(sp)
|
||||
REG_S x11, PT_A1(sp)
|
||||
REG_S x12, PT_A2(sp)
|
||||
REG_S x13, PT_A3(sp)
|
||||
REG_S x14, PT_A4(sp)
|
||||
REG_S x15, PT_A5(sp)
|
||||
REG_S x16, PT_A6(sp)
|
||||
REG_S x17, PT_A7(sp)
|
||||
REG_S x28, PT_T3(sp)
|
||||
REG_S x29, PT_T4(sp)
|
||||
REG_S x30, PT_T5(sp)
|
||||
REG_S x31, PT_T6(sp)
|
||||
|
||||
la ra, restore_caller_reg
|
||||
tail get_overflow_stack
|
||||
|
||||
restore_caller_reg:
|
||||
//save per-cpu overflow stack
|
||||
REG_S a0, -8(sp)
|
||||
//restore caller register from shadow_stack
|
||||
REG_L x1, PT_RA(sp)
|
||||
REG_L x5, PT_T0(sp)
|
||||
REG_L x6, PT_T1(sp)
|
||||
REG_L x7, PT_T2(sp)
|
||||
REG_L x10, PT_A0(sp)
|
||||
REG_L x11, PT_A1(sp)
|
||||
REG_L x12, PT_A2(sp)
|
||||
REG_L x13, PT_A3(sp)
|
||||
REG_L x14, PT_A4(sp)
|
||||
REG_L x15, PT_A5(sp)
|
||||
REG_L x16, PT_A6(sp)
|
||||
REG_L x17, PT_A7(sp)
|
||||
REG_L x28, PT_T3(sp)
|
||||
REG_L x29, PT_T4(sp)
|
||||
REG_L x30, PT_T5(sp)
|
||||
REG_L x31, PT_T6(sp)
|
||||
|
||||
//load per-cpu overflow stack
|
||||
REG_L sp, -8(sp)
|
||||
addi sp, sp, -(PT_SIZE_ON_STACK)
|
||||
|
||||
//save context to overflow stack
|
||||
|
|
|
@ -24,7 +24,7 @@ static inline bool rv_insn_reg_set_val(struct pt_regs *regs, u32 index,
|
|||
unsigned long val)
|
||||
{
|
||||
if (index == 0)
|
||||
return false;
|
||||
return true;
|
||||
else if (index <= 31)
|
||||
*((unsigned long *)regs + index) = val;
|
||||
else
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include <linux/highmem.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/uprobes.h>
|
||||
#include <asm/insn.h>
|
||||
|
||||
#include "decode-insn.h"
|
||||
|
||||
|
@ -17,6 +18,11 @@ bool is_swbp_insn(uprobe_opcode_t *insn)
|
|||
#endif
|
||||
}
|
||||
|
||||
bool is_trap_insn(uprobe_opcode_t *insn)
|
||||
{
|
||||
return riscv_insn_is_ebreak(*insn) || riscv_insn_is_c_ebreak(*insn);
|
||||
}
|
||||
|
||||
unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
|
||||
{
|
||||
return instruction_pointer(regs);
|
||||
|
|
|
@ -408,48 +408,14 @@ int is_valid_bugaddr(unsigned long pc)
|
|||
#endif /* CONFIG_GENERIC_BUG */
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
/*
|
||||
* Extra stack space that allows us to provide panic messages when the kernel
|
||||
* has overflowed its stack.
|
||||
*/
|
||||
static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
|
||||
DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
|
||||
overflow_stack)__aligned(16);
|
||||
/*
|
||||
* A temporary stack for use by handle_kernel_stack_overflow. This is used so
|
||||
* we can call into C code to get the per-hart overflow stack. Usage of this
|
||||
* stack must be protected by spin_shadow_stack.
|
||||
*/
|
||||
long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
|
||||
|
||||
/*
|
||||
* A pseudo spinlock to protect the shadow stack from being used by multiple
|
||||
* harts concurrently. This isn't a real spinlock because the lock side must
|
||||
* be taken without a valid stack and only a single register, it's only taken
|
||||
* while in the process of panicing anyway so the performance and error
|
||||
* checking a proper spinlock gives us doesn't matter.
|
||||
*/
|
||||
unsigned long spin_shadow_stack;
|
||||
|
||||
asmlinkage unsigned long get_overflow_stack(void)
|
||||
{
|
||||
return (unsigned long)this_cpu_ptr(overflow_stack) +
|
||||
OVERFLOW_STACK_SIZE;
|
||||
}
|
||||
|
||||
asmlinkage void handle_bad_stack(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long tsk_stk = (unsigned long)current->stack;
|
||||
unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
|
||||
|
||||
/*
|
||||
* We're done with the shadow stack by this point, as we're on the
|
||||
* overflow stack. Tell any other concurrent overflowing harts that
|
||||
* they can proceed with panicing by releasing the pseudo-spinlock.
|
||||
*
|
||||
* This pairs with an amoswap.aq in handle_kernel_stack_overflow.
|
||||
*/
|
||||
smp_store_release(&spin_shadow_stack, 0);
|
||||
|
||||
console_verbose();
|
||||
|
||||
pr_emerg("Insufficient stack space to handle exception!\n");
|
||||
|
|
|
@ -37,7 +37,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
|
|||
|
||||
/* This is something we can handle, fill out the pairs. */
|
||||
while (p < end) {
|
||||
if (p->key <= RISCV_HWPROBE_MAX_KEY) {
|
||||
if (riscv_hwprobe_key_is_valid(p->key)) {
|
||||
p->value = avd->all_cpu_hwprobe_values[p->key];
|
||||
|
||||
} else {
|
||||
|
|
|
@ -384,6 +384,9 @@ static int __init ptdump_init(void)
|
|||
|
||||
kernel_ptd_info.base_addr = KERN_VIRT_START;
|
||||
|
||||
pg_level[1].name = pgtable_l5_enabled ? "P4D" : "PGD";
|
||||
pg_level[2].name = pgtable_l4_enabled ? "PUD" : "PGD";
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pg_level); i++)
|
||||
for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
|
||||
pg_level[i].mask |= pte_bits[j].mask;
|
||||
|
|
|
@ -121,7 +121,7 @@ static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
|
|||
continue;
|
||||
if (!pud_folded(*pud)) {
|
||||
page = phys_to_page(pud_val(*pud));
|
||||
for (i = 0; i < 3; i++)
|
||||
for (i = 0; i < 4; i++)
|
||||
set_bit(PG_arch_1, &page[i].flags);
|
||||
}
|
||||
mark_kernel_pmd(pud, addr, next);
|
||||
|
@ -142,7 +142,7 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
|
|||
continue;
|
||||
if (!p4d_folded(*p4d)) {
|
||||
page = phys_to_page(p4d_val(*p4d));
|
||||
for (i = 0; i < 3; i++)
|
||||
for (i = 0; i < 4; i++)
|
||||
set_bit(PG_arch_1, &page[i].flags);
|
||||
}
|
||||
mark_kernel_pud(p4d, addr, next);
|
||||
|
@ -164,7 +164,7 @@ static void mark_kernel_pgd(void)
|
|||
continue;
|
||||
if (!pgd_folded(*pgd)) {
|
||||
page = phys_to_page(pgd_val(*pgd));
|
||||
for (i = 0; i < 3; i++)
|
||||
for (i = 0; i < 4; i++)
|
||||
set_bit(PG_arch_1, &page[i].flags);
|
||||
}
|
||||
mark_kernel_p4d(pgd, addr, next);
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/hugetlb.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sort.h>
|
||||
#include <asm/page-states.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
@ -46,8 +47,11 @@ void *vmem_crst_alloc(unsigned long val)
|
|||
unsigned long *table;
|
||||
|
||||
table = vmem_alloc_pages(CRST_ALLOC_ORDER);
|
||||
if (table)
|
||||
if (!table)
|
||||
return NULL;
|
||||
crst_table_init(table, val);
|
||||
if (slab_is_available())
|
||||
arch_set_page_dat(virt_to_page(table), CRST_ALLOC_ORDER);
|
||||
return table;
|
||||
}
|
||||
|
||||
|
|
|
@ -24,8 +24,17 @@
|
|||
#include <linux/types.h>
|
||||
#include <crypto/sha1.h>
|
||||
#include <crypto/sha1_base.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
static const struct x86_cpu_id module_cpu_ids[] = {
|
||||
X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
|
||||
|
||||
static int sha1_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, sha1_block_fn *sha1_xform)
|
||||
{
|
||||
|
@ -301,6 +310,9 @@ static inline void unregister_sha1_ni(void) { }
|
|||
|
||||
static int __init sha1_ssse3_mod_init(void)
|
||||
{
|
||||
if (!x86_match_cpu(module_cpu_ids))
|
||||
return -ENODEV;
|
||||
|
||||
if (register_sha1_ssse3())
|
||||
goto fail;
|
||||
|
||||
|
|
|
@ -38,11 +38,20 @@
|
|||
#include <crypto/sha2.h>
|
||||
#include <crypto/sha256_base.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
|
||||
const u8 *data, int blocks);
|
||||
|
||||
static const struct x86_cpu_id module_cpu_ids[] = {
|
||||
X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
|
||||
|
||||
static int _sha256_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, sha256_block_fn *sha256_xform)
|
||||
{
|
||||
|
@ -366,6 +375,9 @@ static inline void unregister_sha256_ni(void) { }
|
|||
|
||||
static int __init sha256_ssse3_mod_init(void)
|
||||
{
|
||||
if (!x86_match_cpu(module_cpu_ids))
|
||||
return -ENODEV;
|
||||
|
||||
if (register_sha256_ssse3())
|
||||
goto fail;
|
||||
|
||||
|
|
|
@ -108,6 +108,7 @@ KVM_X86_OP_OPTIONAL(vcpu_blocking)
|
|||
KVM_X86_OP_OPTIONAL(vcpu_unblocking)
|
||||
KVM_X86_OP_OPTIONAL(pi_update_irte)
|
||||
KVM_X86_OP_OPTIONAL(pi_start_assignment)
|
||||
KVM_X86_OP_OPTIONAL(apicv_pre_state_restore)
|
||||
KVM_X86_OP_OPTIONAL(apicv_post_state_restore)
|
||||
KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt)
|
||||
KVM_X86_OP_OPTIONAL(set_hv_timer)
|
||||
|
|
|
@ -1690,6 +1690,7 @@ struct kvm_x86_ops {
|
|||
int (*pi_update_irte)(struct kvm *kvm, unsigned int host_irq,
|
||||
uint32_t guest_irq, bool set);
|
||||
void (*pi_start_assignment)(struct kvm *kvm);
|
||||
void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu);
|
||||
void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
|
||||
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
|
|
@ -553,6 +553,7 @@
|
|||
#define MSR_AMD64_CPUID_FN_1 0xc0011004
|
||||
#define MSR_AMD64_LS_CFG 0xc0011020
|
||||
#define MSR_AMD64_DC_CFG 0xc0011022
|
||||
#define MSR_AMD64_TW_CFG 0xc0011023
|
||||
|
||||
#define MSR_AMD64_DE_CFG 0xc0011029
|
||||
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
|
||||
|
|
|
@ -12,13 +12,6 @@
|
|||
|
||||
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
|
||||
|
||||
/*
|
||||
* Too small node sizes may confuse the VM badly. Usually they
|
||||
* result from BIOS bugs. So dont recognize nodes as standalone
|
||||
* NUMA entities that have less than this amount of RAM listed:
|
||||
*/
|
||||
#define NODE_MIN_SIZE (4*1024*1024)
|
||||
|
||||
extern int numa_off;
|
||||
|
||||
/*
|
||||
|
|
|
@ -55,14 +55,14 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
|
|||
* caused by the non-atomic update of the address/data pair.
|
||||
*
|
||||
* Direct update is possible when:
|
||||
* - The MSI is maskable (remapped MSI does not use this code path)).
|
||||
* The quirk bit is not set in this case.
|
||||
* - The MSI is maskable (remapped MSI does not use this code path).
|
||||
* The reservation mode bit is set in this case.
|
||||
* - The new vector is the same as the old vector
|
||||
* - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
|
||||
* - The interrupt is not yet started up
|
||||
* - The new destination CPU is the same as the old destination CPU
|
||||
*/
|
||||
if (!irqd_msi_nomask_quirk(irqd) ||
|
||||
if (!irqd_can_reserve(irqd) ||
|
||||
cfg->vector == old_cfg.vector ||
|
||||
old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
|
||||
!irqd_is_started(irqd) ||
|
||||
|
@ -215,8 +215,6 @@ static bool x86_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
|
|||
if (WARN_ON_ONCE(domain != real_parent))
|
||||
return false;
|
||||
info->chip->irq_set_affinity = msi_set_affinity;
|
||||
/* See msi_set_affinity() for the gory details */
|
||||
info->flags |= MSI_FLAG_NOMASK_QUIRK;
|
||||
break;
|
||||
case DOMAIN_BUS_DMAR:
|
||||
case DOMAIN_BUS_AMDVI:
|
||||
|
|
|
@ -86,7 +86,11 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
|
|||
if (!err)
|
||||
c->x86_coreid_bits = get_count_order(c->x86_max_cores);
|
||||
|
||||
/* Socket ID is ApicId[6] for these processors. */
|
||||
/*
|
||||
* Socket ID is ApicId[6] for the processors with model <= 0x3
|
||||
* when running on host.
|
||||
*/
|
||||
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
|
||||
c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
|
||||
|
||||
cacheinfo_hygon_init_llc_id(c, cpu);
|
||||
|
|
|
@ -727,10 +727,12 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
|
|||
|
||||
stimer_cleanup(stimer);
|
||||
stimer->count = count;
|
||||
if (!host) {
|
||||
if (stimer->count == 0)
|
||||
stimer->config.enable = 0;
|
||||
else if (stimer->config.auto_enable)
|
||||
stimer->config.enable = 1;
|
||||
}
|
||||
|
||||
if (stimer->config.enable)
|
||||
stimer_mark_pending(stimer, false);
|
||||
|
|
|
@ -2423,22 +2423,22 @@ EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
|
|||
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
|
||||
{
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* ICR is a single 64-bit register when x2APIC is enabled. For legacy
|
||||
* xAPIC, ICR writes need to go down the common (slightly slower) path
|
||||
* to get the upper half from ICR2.
|
||||
* ICR is a single 64-bit register when x2APIC is enabled, all others
|
||||
* registers hold 32-bit values. For legacy xAPIC, ICR writes need to
|
||||
* go down the common path to get the upper half from ICR2.
|
||||
*
|
||||
* Note, using the write helpers may incur an unnecessary write to the
|
||||
* virtual APIC state, but KVM needs to conditionally modify the value
|
||||
* in certain cases, e.g. to clear the ICR busy bit. The cost of extra
|
||||
* conditional branches is likely a wash relative to the cost of the
|
||||
* maybe-unecessary write, and both are in the noise anyways.
|
||||
*/
|
||||
if (apic_x2apic_mode(apic) && offset == APIC_ICR) {
|
||||
val = kvm_lapic_get_reg64(apic, APIC_ICR);
|
||||
kvm_apic_send_ipi(apic, (u32)val, (u32)(val >> 32));
|
||||
trace_kvm_apic_write(APIC_ICR, val);
|
||||
} else {
|
||||
/* TODO: optimize to just emulate side effect w/o one more write */
|
||||
val = kvm_lapic_get_reg(apic, offset);
|
||||
kvm_lapic_reg_write(apic, offset, (u32)val);
|
||||
}
|
||||
if (apic_x2apic_mode(apic) && offset == APIC_ICR)
|
||||
kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR));
|
||||
else
|
||||
kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
|
||||
|
||||
|
@ -2649,6 +2649,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|||
u64 msr_val;
|
||||
int i;
|
||||
|
||||
static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
|
||||
|
||||
if (!init_event) {
|
||||
msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
|
||||
if (kvm_vcpu_is_reset_bsp(vcpu))
|
||||
|
@ -2960,6 +2962,8 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
|
|||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
int r;
|
||||
|
||||
static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
|
||||
|
||||
kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
|
||||
/* set SPIV separately to get count of SW disabled APICs right */
|
||||
apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
|
||||
|
|
|
@ -6909,7 +6909,7 @@ static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
|||
vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
|
||||
}
|
||||
|
||||
static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
|
||||
static void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
||||
|
@ -8275,7 +8275,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
|
|||
.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
|
||||
.refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
|
||||
.load_eoi_exitmap = vmx_load_eoi_exitmap,
|
||||
.apicv_post_state_restore = vmx_apicv_post_state_restore,
|
||||
.apicv_pre_state_restore = vmx_apicv_pre_state_restore,
|
||||
.required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
|
||||
.hwapic_irr_update = vmx_hwapic_irr_update,
|
||||
.hwapic_isr_update = vmx_hwapic_isr_update,
|
||||
|
|
|
@ -3643,6 +3643,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
case MSR_AMD64_PATCH_LOADER:
|
||||
case MSR_AMD64_BU_CFG2:
|
||||
case MSR_AMD64_DC_CFG:
|
||||
case MSR_AMD64_TW_CFG:
|
||||
case MSR_F15H_EX_CFG:
|
||||
break;
|
||||
|
||||
|
@ -4067,6 +4068,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
case MSR_AMD64_BU_CFG2:
|
||||
case MSR_IA32_PERF_CTL:
|
||||
case MSR_AMD64_DC_CFG:
|
||||
case MSR_AMD64_TW_CFG:
|
||||
case MSR_F15H_EX_CFG:
|
||||
/*
|
||||
* Intel Sandy Bridge CPUs must support the RAPL (running average power
|
||||
|
|
|
@ -602,13 +602,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
|
|||
if (start >= end)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Don't confuse VM with a node that doesn't have the
|
||||
* minimum amount of memory:
|
||||
*/
|
||||
if (end && (end - start) < NODE_MIN_SIZE)
|
||||
continue;
|
||||
|
||||
alloc_node_data(nid);
|
||||
}
|
||||
|
||||
|
|
|
@ -3,9 +3,11 @@
|
|||
* Exceptions for specific devices. Usually work-arounds for fatal design flaws.
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/vgaarb.h>
|
||||
#include <asm/amd_nb.h>
|
||||
#include <asm/hpet.h>
|
||||
|
@ -904,3 +906,60 @@ static void chromeos_fixup_apl_pci_l1ss_capability(struct pci_dev *dev)
|
|||
}
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_save_apl_pci_l1ss_capability);
|
||||
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_fixup_apl_pci_l1ss_capability);
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
/*
|
||||
* Root Ports on some AMD SoCs advertise PME_Support for D3hot and D3cold, but
|
||||
* if the SoC is put into a hardware sleep state by the amd-pmc driver, the
|
||||
* Root Ports don't generate wakeup interrupts for USB devices.
|
||||
*
|
||||
* When suspending, remove D3hot and D3cold from the PME_Support advertised
|
||||
* by the Root Port so we don't use those states if we're expecting wakeup
|
||||
* interrupts. Restore the advertised PME_Support when resuming.
|
||||
*/
|
||||
static void amd_rp_pme_suspend(struct pci_dev *dev)
|
||||
{
|
||||
struct pci_dev *rp;
|
||||
|
||||
/*
|
||||
* PM_SUSPEND_ON means we're doing runtime suspend, which means
|
||||
* amd-pmc will not be involved so PMEs during D3 work as advertised.
|
||||
*
|
||||
* The PMEs *do* work if amd-pmc doesn't put the SoC in the hardware
|
||||
* sleep state, but we assume amd-pmc is always present.
|
||||
*/
|
||||
if (pm_suspend_target_state == PM_SUSPEND_ON)
|
||||
return;
|
||||
|
||||
rp = pcie_find_root_port(dev);
|
||||
if (!rp->pm_cap)
|
||||
return;
|
||||
|
||||
rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >>
|
||||
PCI_PM_CAP_PME_SHIFT);
|
||||
dev_info_once(&rp->dev, "quirk: disabling D3cold for suspend\n");
|
||||
}
|
||||
|
||||
static void amd_rp_pme_resume(struct pci_dev *dev)
|
||||
{
|
||||
struct pci_dev *rp;
|
||||
u16 pmc;
|
||||
|
||||
rp = pcie_find_root_port(dev);
|
||||
if (!rp->pm_cap)
|
||||
return;
|
||||
|
||||
pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc);
|
||||
rp->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
|
||||
}
|
||||
/* Rembrandt (yellow_carp) */
|
||||
DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162e, amd_rp_pme_suspend);
|
||||
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162e, amd_rp_pme_resume);
|
||||
DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162f, amd_rp_pme_suspend);
|
||||
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162f, amd_rp_pme_resume);
|
||||
/* Phoenix (pink_sardine) */
|
||||
DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_suspend);
|
||||
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_resume);
|
||||
DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_suspend);
|
||||
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_resume);
|
||||
#endif /* CONFIG_SUSPEND */
|
||||
|
|
|
@ -2874,11 +2874,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
|
|||
};
|
||||
struct request *rq;
|
||||
|
||||
if (unlikely(bio_queue_enter(bio)))
|
||||
return NULL;
|
||||
|
||||
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
|
||||
goto queue_exit;
|
||||
return NULL;
|
||||
|
||||
rq_qos_throttle(q, bio);
|
||||
|
||||
|
@ -2894,35 +2891,23 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
|
|||
rq_qos_cleanup(q, bio);
|
||||
if (bio->bi_opf & REQ_NOWAIT)
|
||||
bio_wouldblock_error(bio);
|
||||
queue_exit:
|
||||
blk_queue_exit(q);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
|
||||
struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
|
||||
/* return true if this @rq can be used for @bio */
|
||||
static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
|
||||
struct bio *bio)
|
||||
{
|
||||
struct request *rq;
|
||||
enum hctx_type type, hctx_type;
|
||||
enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
|
||||
enum hctx_type hctx_type = rq->mq_hctx->type;
|
||||
|
||||
if (!plug)
|
||||
return NULL;
|
||||
rq = rq_list_peek(&plug->cached_rq);
|
||||
if (!rq || rq->q != q)
|
||||
return NULL;
|
||||
WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
|
||||
|
||||
if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
|
||||
*bio = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
type = blk_mq_get_hctx_type((*bio)->bi_opf);
|
||||
hctx_type = rq->mq_hctx->type;
|
||||
if (type != hctx_type &&
|
||||
!(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
|
||||
return NULL;
|
||||
if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
|
||||
return NULL;
|
||||
return false;
|
||||
if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If any qos ->throttle() end up blocking, we will have flushed the
|
||||
|
@ -2930,12 +2915,12 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
|
|||
* before we throttle.
|
||||
*/
|
||||
plug->cached_rq = rq_list_next(rq);
|
||||
rq_qos_throttle(q, *bio);
|
||||
rq_qos_throttle(rq->q, bio);
|
||||
|
||||
blk_mq_rq_time_init(rq, 0);
|
||||
rq->cmd_flags = (*bio)->bi_opf;
|
||||
rq->cmd_flags = bio->bi_opf;
|
||||
INIT_LIST_HEAD(&rq->queuelist);
|
||||
return rq;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void bio_set_ioprio(struct bio *bio)
|
||||
|
@ -2965,7 +2950,7 @@ void blk_mq_submit_bio(struct bio *bio)
|
|||
struct blk_plug *plug = blk_mq_plug(bio);
|
||||
const int is_sync = op_is_sync(bio->bi_opf);
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
struct request *rq;
|
||||
struct request *rq = NULL;
|
||||
unsigned int nr_segs = 1;
|
||||
blk_status_t ret;
|
||||
|
||||
|
@ -2976,20 +2961,36 @@ void blk_mq_submit_bio(struct bio *bio)
|
|||
return;
|
||||
}
|
||||
|
||||
if (!bio_integrity_prep(bio))
|
||||
return;
|
||||
|
||||
bio_set_ioprio(bio);
|
||||
|
||||
rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
|
||||
if (!rq) {
|
||||
if (!bio)
|
||||
if (plug) {
|
||||
rq = rq_list_peek(&plug->cached_rq);
|
||||
if (rq && rq->q != q)
|
||||
rq = NULL;
|
||||
}
|
||||
if (rq) {
|
||||
if (!bio_integrity_prep(bio))
|
||||
return;
|
||||
if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
|
||||
return;
|
||||
if (blk_mq_can_use_cached_rq(rq, plug, bio))
|
||||
goto done;
|
||||
percpu_ref_get(&q->q_usage_counter);
|
||||
} else {
|
||||
if (unlikely(bio_queue_enter(bio)))
|
||||
return;
|
||||
if (!bio_integrity_prep(bio))
|
||||
goto fail;
|
||||
}
|
||||
|
||||
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
|
||||
if (unlikely(!rq))
|
||||
if (unlikely(!rq)) {
|
||||
fail:
|
||||
blk_queue_exit(q);
|
||||
return;
|
||||
}
|
||||
|
||||
done:
|
||||
trace_block_getrq(bio);
|
||||
|
||||
rq_qos_track(q, rq, bio);
|
||||
|
|
|
@ -117,6 +117,8 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
|
|||
err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
|
||||
if (!err)
|
||||
return -EINPROGRESS;
|
||||
if (err == -EBUSY)
|
||||
return -EAGAIN;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -164,6 +166,8 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
|
|||
err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
|
||||
if (!err)
|
||||
return -EINPROGRESS;
|
||||
if (err == -EBUSY)
|
||||
return -EAGAIN;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -194,12 +194,19 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
|
|||
record_header = (void *)subtable_header + offset;
|
||||
offset += record_header->length;
|
||||
|
||||
if (!record_header->length) {
|
||||
pr_err(FW_BUG "Zero-length record found in FPTD.\n");
|
||||
result = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
switch (record_header->type) {
|
||||
case RECORD_S3_RESUME:
|
||||
if (subtable_type != SUBTABLE_S3PT) {
|
||||
pr_err(FW_BUG "Invalid record %d for subtable %s\n",
|
||||
record_header->type, signature);
|
||||
return -EINVAL;
|
||||
result = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
if (record_resume) {
|
||||
pr_err("Duplicate resume performance record found.\n");
|
||||
|
@ -208,7 +215,7 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
|
|||
record_resume = (struct resume_performance_record *)record_header;
|
||||
result = sysfs_create_group(fpdt_kobj, &resume_attr_group);
|
||||
if (result)
|
||||
return result;
|
||||
goto err;
|
||||
break;
|
||||
case RECORD_S3_SUSPEND:
|
||||
if (subtable_type != SUBTABLE_S3PT) {
|
||||
|
@ -223,13 +230,14 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
|
|||
record_suspend = (struct suspend_performance_record *)record_header;
|
||||
result = sysfs_create_group(fpdt_kobj, &suspend_attr_group);
|
||||
if (result)
|
||||
return result;
|
||||
goto err;
|
||||
break;
|
||||
case RECORD_BOOT:
|
||||
if (subtable_type != SUBTABLE_FBPT) {
|
||||
pr_err(FW_BUG "Invalid %d for subtable %s\n",
|
||||
record_header->type, signature);
|
||||
return -EINVAL;
|
||||
result = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
if (record_boot) {
|
||||
pr_err("Duplicate boot performance record found.\n");
|
||||
|
@ -238,7 +246,7 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
|
|||
record_boot = (struct boot_performance_record *)record_header;
|
||||
result = sysfs_create_group(fpdt_kobj, &boot_attr_group);
|
||||
if (result)
|
||||
return result;
|
||||
goto err;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -247,6 +255,18 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
|
|||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
err:
|
||||
if (record_boot)
|
||||
sysfs_remove_group(fpdt_kobj, &boot_attr_group);
|
||||
|
||||
if (record_suspend)
|
||||
sysfs_remove_group(fpdt_kobj, &suspend_attr_group);
|
||||
|
||||
if (record_resume)
|
||||
sysfs_remove_group(fpdt_kobj, &resume_attr_group);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int __init acpi_init_fpdt(void)
|
||||
|
@ -255,6 +275,7 @@ static int __init acpi_init_fpdt(void)
|
|||
struct acpi_table_header *header;
|
||||
struct fpdt_subtable_entry *subtable;
|
||||
u32 offset = sizeof(*header);
|
||||
int result;
|
||||
|
||||
status = acpi_get_table(ACPI_SIG_FPDT, 0, &header);
|
||||
|
||||
|
@ -263,8 +284,8 @@ static int __init acpi_init_fpdt(void)
|
|||
|
||||
fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj);
|
||||
if (!fpdt_kobj) {
|
||||
acpi_put_table(header);
|
||||
return -ENOMEM;
|
||||
result = -ENOMEM;
|
||||
goto err_nomem;
|
||||
}
|
||||
|
||||
while (offset < header->length) {
|
||||
|
@ -272,8 +293,10 @@ static int __init acpi_init_fpdt(void)
|
|||
switch (subtable->type) {
|
||||
case SUBTABLE_FBPT:
|
||||
case SUBTABLE_S3PT:
|
||||
fpdt_process_subtable(subtable->address,
|
||||
result = fpdt_process_subtable(subtable->address,
|
||||
subtable->type);
|
||||
if (result)
|
||||
goto err_subtable;
|
||||
break;
|
||||
default:
|
||||
/* Other types are reserved in ACPI 6.4 spec. */
|
||||
|
@ -282,6 +305,12 @@ static int __init acpi_init_fpdt(void)
|
|||
offset += sizeof(*subtable);
|
||||
}
|
||||
return 0;
|
||||
err_subtable:
|
||||
kobject_put(fpdt_kobj);
|
||||
|
||||
err_nomem:
|
||||
acpi_put_table(header);
|
||||
return result;
|
||||
}
|
||||
|
||||
fs_initcall(acpi_init_fpdt);
|
||||
|
|
|
@ -209,6 +209,20 @@ err_pool_alloc:
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* ghes_estatus_pool_region_free - free previously allocated memory
|
||||
* from the ghes_estatus_pool.
|
||||
* @addr: address of memory to free.
|
||||
* @size: size of memory to free.
|
||||
*
|
||||
* Returns none.
|
||||
*/
|
||||
void ghes_estatus_pool_region_free(unsigned long addr, u32 size)
|
||||
{
|
||||
gen_pool_free(ghes_estatus_pool, addr, size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ghes_estatus_pool_region_free);
|
||||
|
||||
static int map_gen_v2(struct ghes *ghes)
|
||||
{
|
||||
return apei_map_generic_address(&ghes->generic_v2->read_ack_register);
|
||||
|
@ -564,6 +578,7 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
|
|||
pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
|
||||
unsigned int devfn;
|
||||
int aer_severity;
|
||||
u8 *aer_info;
|
||||
|
||||
devfn = PCI_DEVFN(pcie_err->device_id.device,
|
||||
pcie_err->device_id.function);
|
||||
|
@ -577,11 +592,17 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
|
|||
if (gdata->flags & CPER_SEC_RESET)
|
||||
aer_severity = AER_FATAL;
|
||||
|
||||
aer_info = (void *)gen_pool_alloc(ghes_estatus_pool,
|
||||
sizeof(struct aer_capability_regs));
|
||||
if (!aer_info)
|
||||
return;
|
||||
memcpy(aer_info, pcie_err->aer_info, sizeof(struct aer_capability_regs));
|
||||
|
||||
aer_recover_queue(pcie_err->device_id.segment,
|
||||
pcie_err->device_id.bus,
|
||||
devfn, aer_severity,
|
||||
(struct aer_capability_regs *)
|
||||
pcie_err->aer_info);
|
||||
aer_info);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -1924,6 +1924,16 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* HP 250 G7 Notebook PC
|
||||
*/
|
||||
.callback = ec_honor_dsdt_gpe,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP 250 G7 Notebook PC"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* Samsung hardware
|
||||
|
|
|
@ -495,6 +495,18 @@ static const struct dmi_system_id maingear_laptop[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"),
|
||||
}
|
||||
},
|
||||
{
|
||||
/* TongFang GMxXGxx/TUXEDO Polaris 15 Gen5 AMD */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "MAINGEAR Vector Pro 2 17",
|
||||
.matches = {
|
||||
|
|
|
@ -2291,19 +2291,21 @@ static int get_esi(struct atm_dev *dev)
|
|||
static int reset_sar(struct atm_dev *dev)
|
||||
{
|
||||
IADEV *iadev;
|
||||
int i, error = 1;
|
||||
int i, error;
|
||||
unsigned int pci[64];
|
||||
|
||||
iadev = INPH_IA_DEV(dev);
|
||||
for(i=0; i<64; i++)
|
||||
if ((error = pci_read_config_dword(iadev->pci,
|
||||
i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
|
||||
for (i = 0; i < 64; i++) {
|
||||
error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]);
|
||||
if (error != PCIBIOS_SUCCESSFUL)
|
||||
return error;
|
||||
}
|
||||
writel(0, iadev->reg+IPHASE5575_EXT_RESET);
|
||||
for(i=0; i<64; i++)
|
||||
if ((error = pci_write_config_dword(iadev->pci,
|
||||
i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
|
||||
for (i = 0; i < 64; i++) {
|
||||
error = pci_write_config_dword(iadev->pci, i * 4, pci[i]);
|
||||
if (error != PCIBIOS_SUCCESSFUL)
|
||||
return error;
|
||||
}
|
||||
udelay(5);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1274,8 +1274,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
|
|||
if (dev->bus && dev->bus->dma_cleanup)
|
||||
dev->bus->dma_cleanup(dev);
|
||||
|
||||
device_links_driver_cleanup(dev);
|
||||
device_unbind_cleanup(dev);
|
||||
device_links_driver_cleanup(dev);
|
||||
|
||||
klist_remove(&dev->p->knode_driver);
|
||||
device_pm_check_callbacks(dev);
|
||||
|
|
|
@ -334,6 +334,11 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int rbtree_all(const void *key, const struct rb_node *node)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* regcache_sync - Sync the register cache with the hardware.
|
||||
*
|
||||
|
@ -351,6 +356,7 @@ int regcache_sync(struct regmap *map)
|
|||
unsigned int i;
|
||||
const char *name;
|
||||
bool bypass;
|
||||
struct rb_node *node;
|
||||
|
||||
if (WARN_ON(map->cache_type == REGCACHE_NONE))
|
||||
return -EINVAL;
|
||||
|
@ -392,6 +398,30 @@ out:
|
|||
/* Restore the bypass state */
|
||||
map->cache_bypass = bypass;
|
||||
map->no_sync_defaults = false;
|
||||
|
||||
/*
|
||||
* If we did any paging with cache bypassed and a cached
|
||||
* paging register then the register and cache state might
|
||||
* have gone out of sync, force writes of all the paging
|
||||
* registers.
|
||||
*/
|
||||
rb_for_each(node, 0, &map->range_tree, rbtree_all) {
|
||||
struct regmap_range_node *this =
|
||||
rb_entry(node, struct regmap_range_node, node);
|
||||
|
||||
/* If there's nothing in the cache there's nothing to sync */
|
||||
ret = regcache_read(map, this->selector_reg, &i);
|
||||
if (ret != 0)
|
||||
continue;
|
||||
|
||||
ret = _regmap_write(map, this->selector_reg, i);
|
||||
if (ret != 0) {
|
||||
dev_err(map->dev, "Failed to write %x = %x: %d\n",
|
||||
this->selector_reg, i, ret);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
map->unlock(map->lock_arg);
|
||||
|
||||
regmap_async_complete(map);
|
||||
|
|
|
@ -1313,6 +1313,7 @@ static int virtblk_probe(struct virtio_device *vdev)
|
|||
u16 min_io_size;
|
||||
u8 physical_block_exp, alignment_offset;
|
||||
unsigned int queue_depth;
|
||||
size_t max_dma_size;
|
||||
|
||||
if (!vdev->config->get) {
|
||||
dev_err(&vdev->dev, "%s failure: config access disabled\n",
|
||||
|
@ -1411,7 +1412,8 @@ static int virtblk_probe(struct virtio_device *vdev)
|
|||
/* No real sector limit. */
|
||||
blk_queue_max_hw_sectors(q, UINT_MAX);
|
||||
|
||||
max_size = virtio_max_dma_size(vdev);
|
||||
max_dma_size = virtio_max_dma_size(vdev);
|
||||
max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size;
|
||||
|
||||
/* Host can optionally specify maximum segment size and number of
|
||||
* segments. */
|
||||
|
|
|
@ -2803,6 +2803,9 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
|
|||
goto err_free_wc;
|
||||
}
|
||||
|
||||
if (data->evt_skb == NULL)
|
||||
goto err_free_wc;
|
||||
|
||||
/* Parse and handle the return WMT event */
|
||||
wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
|
||||
if (wmt_evt->whdr.op != hdr->op) {
|
||||
|
|
|
@ -38,7 +38,7 @@ static struct _parisc_agp_info {
|
|||
|
||||
int lba_cap_offset;
|
||||
|
||||
u64 *gatt;
|
||||
__le64 *gatt;
|
||||
u64 gatt_entries;
|
||||
|
||||
u64 gart_base;
|
||||
|
@ -104,7 +104,7 @@ parisc_agp_create_gatt_table(struct agp_bridge_data *bridge)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < info->gatt_entries; i++) {
|
||||
info->gatt[i] = (unsigned long)agp_bridge->scratch_page;
|
||||
info->gatt[i] = cpu_to_le64(agp_bridge->scratch_page);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -158,9 +158,9 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
|
|||
for (k = 0;
|
||||
k < info->io_pages_per_kpage;
|
||||
k++, j++, paddr += info->io_page_size) {
|
||||
info->gatt[j] =
|
||||
info->gatt[j] = cpu_to_le64(
|
||||
parisc_agp_mask_memory(agp_bridge,
|
||||
paddr, type);
|
||||
paddr, type));
|
||||
asm_io_fdc(&info->gatt[j]);
|
||||
}
|
||||
}
|
||||
|
@ -184,7 +184,7 @@ parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
|
|||
io_pg_start = info->io_pages_per_kpage * pg_start;
|
||||
io_pg_count = info->io_pages_per_kpage * mem->page_count;
|
||||
for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
|
||||
info->gatt[i] = agp_bridge->scratch_page;
|
||||
info->gatt[i] = cpu_to_le64(agp_bridge->scratch_page);
|
||||
}
|
||||
|
||||
agp_bridge->driver->tlb_flush(mem);
|
||||
|
@ -204,7 +204,8 @@ parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
|
|||
pa |= (ci >> PAGE_SHIFT) & 0xff;/* move CI (8 bits) into lowest byte */
|
||||
pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
|
||||
|
||||
return cpu_to_le64(pa);
|
||||
/* return native (big-endian) PDIR entry */
|
||||
return pa;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -251,7 +252,8 @@ static int __init
|
|||
agp_ioc_init(void __iomem *ioc_regs)
|
||||
{
|
||||
struct _parisc_agp_info *info = &parisc_agp_info;
|
||||
u64 iova_base, *io_pdir, io_tlb_ps;
|
||||
u64 iova_base, io_tlb_ps;
|
||||
__le64 *io_pdir;
|
||||
int io_tlb_shift;
|
||||
|
||||
printk(KERN_INFO DRVPFX "IO PDIR shared with sba_iommu\n");
|
||||
|
|
|
@ -73,7 +73,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
|
|||
&gpll0_main.clkr.hw },
|
||||
.num_parents = 1,
|
||||
.ops = &clk_fixed_factor_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -87,7 +86,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
|
|||
&gpll0_main.clkr.hw },
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_ro_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -162,7 +160,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
|
|||
&gpll6_main.clkr.hw },
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_ro_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -193,7 +190,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
|
|||
&gpll4_main.clkr.hw },
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_ro_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -244,7 +240,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
|
|||
&gpll2_main.clkr.hw },
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_ro_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -275,7 +270,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
|
|||
&nss_crypto_pll_main.clkr.hw },
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_ro_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -76,7 +76,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
|
|||
&gpll0_main.clkr.hw },
|
||||
.num_parents = 1,
|
||||
.ops = &clk_fixed_factor_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -122,7 +121,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
|
|||
&gpll2_main.clkr.hw },
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_ro_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -155,7 +153,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
|
|||
&gpll4_main.clkr.hw },
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_ro_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -189,7 +186,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
|
|||
&gpll6_main.clkr.hw },
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_ro_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -202,7 +198,6 @@ static struct clk_fixed_factor gpll6_out_main_div2 = {
|
|||
&gpll6_main.clkr.hw },
|
||||
.num_parents = 1,
|
||||
.ops = &clk_fixed_factor_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -267,7 +262,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
|
|||
&nss_crypto_pll_main.clkr.hw },
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_ro_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -7,8 +7,10 @@
|
|||
#define __STRATIX10_CLK_H
|
||||
|
||||
struct stratix10_clock_data {
|
||||
struct clk_hw_onecell_data clk_data;
|
||||
void __iomem *base;
|
||||
|
||||
/* Must be last */
|
||||
struct clk_hw_onecell_data clk_data;
|
||||
};
|
||||
|
||||
struct stratix10_pll_clock {
|
||||
|
|
|
@ -15,8 +15,10 @@
|
|||
|
||||
struct visconti_pll_provider {
|
||||
void __iomem *reg_base;
|
||||
struct clk_hw_onecell_data clk_data;
|
||||
struct device_node *node;
|
||||
|
||||
/* Must be last */
|
||||
struct clk_hw_onecell_data clk_data;
|
||||
};
|
||||
|
||||
#define VISCONTI_PLL_RATE(_rate, _dacen, _dsmen, \
|
||||
|
|
|
@ -315,6 +315,7 @@ static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
|
|||
writel(mck_divisor_idx /* likely divide-by-8 */
|
||||
| ATMEL_TC_WAVE
|
||||
| ATMEL_TC_WAVESEL_UP /* free-run */
|
||||
| ATMEL_TC_ASWTRG_SET /* TIOA0 rises at software trigger */
|
||||
| ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
|
||||
| ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
|
||||
tcaddr + ATMEL_TC_REG(0, CMR));
|
||||
|
|
|
@ -434,12 +434,16 @@ static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type t
|
|||
return -ENOMEM;
|
||||
|
||||
imxtm->base = of_iomap(np, 0);
|
||||
if (!imxtm->base)
|
||||
return -ENXIO;
|
||||
if (!imxtm->base) {
|
||||
ret = -ENXIO;
|
||||
goto err_kfree;
|
||||
}
|
||||
|
||||
imxtm->irq = irq_of_parse_and_map(np, 0);
|
||||
if (imxtm->irq <= 0)
|
||||
return -EINVAL;
|
||||
if (imxtm->irq <= 0) {
|
||||
ret = -EINVAL;
|
||||
goto err_kfree;
|
||||
}
|
||||
|
||||
imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
|
||||
|
||||
|
@ -452,11 +456,15 @@ static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type t
|
|||
|
||||
ret = _mxc_timer_init(imxtm);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_kfree;
|
||||
|
||||
initialized = 1;
|
||||
|
||||
return 0;
|
||||
|
||||
err_kfree:
|
||||
kfree(imxtm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init imx1_timer_init_dt(struct device_node *np)
|
||||
|
|
|
@ -131,23 +131,23 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
|
|||
len += sysfs_emit_at(buf, len, " From : To\n");
|
||||
len += sysfs_emit_at(buf, len, " : ");
|
||||
for (i = 0; i < stats->state_num; i++) {
|
||||
if (len >= PAGE_SIZE)
|
||||
if (len >= PAGE_SIZE - 1)
|
||||
break;
|
||||
len += sysfs_emit_at(buf, len, "%9u ", stats->freq_table[i]);
|
||||
}
|
||||
if (len >= PAGE_SIZE)
|
||||
return PAGE_SIZE;
|
||||
if (len >= PAGE_SIZE - 1)
|
||||
return PAGE_SIZE - 1;
|
||||
|
||||
len += sysfs_emit_at(buf, len, "\n");
|
||||
|
||||
for (i = 0; i < stats->state_num; i++) {
|
||||
if (len >= PAGE_SIZE)
|
||||
if (len >= PAGE_SIZE - 1)
|
||||
break;
|
||||
|
||||
len += sysfs_emit_at(buf, len, "%9u: ", stats->freq_table[i]);
|
||||
|
||||
for (j = 0; j < stats->state_num; j++) {
|
||||
if (len >= PAGE_SIZE)
|
||||
if (len >= PAGE_SIZE - 1)
|
||||
break;
|
||||
|
||||
if (pending)
|
||||
|
@ -157,12 +157,12 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
|
|||
|
||||
len += sysfs_emit_at(buf, len, "%9u ", count);
|
||||
}
|
||||
if (len >= PAGE_SIZE)
|
||||
if (len >= PAGE_SIZE - 1)
|
||||
break;
|
||||
len += sysfs_emit_at(buf, len, "\n");
|
||||
}
|
||||
|
||||
if (len >= PAGE_SIZE) {
|
||||
if (len >= PAGE_SIZE - 1) {
|
||||
pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
|
||||
return -EFBIG;
|
||||
}
|
||||
|
|
|
@ -845,6 +845,8 @@ static void qm_poll_req_cb(struct hisi_qp *qp)
|
|||
qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
|
||||
qp->qp_status.cq_head, 0);
|
||||
atomic_dec(&qp->qp_status.used);
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
/* set c_flag */
|
||||
|
|
|
@ -1242,35 +1242,39 @@ static struct device *grandparent(struct device *dev)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct device *endpoint_host(struct cxl_port *endpoint)
|
||||
{
|
||||
struct cxl_port *port = to_cxl_port(endpoint->dev.parent);
|
||||
|
||||
if (is_cxl_root(port))
|
||||
return port->uport_dev;
|
||||
return &port->dev;
|
||||
}
|
||||
|
||||
static void delete_endpoint(void *data)
|
||||
{
|
||||
struct cxl_memdev *cxlmd = data;
|
||||
struct cxl_port *endpoint = cxlmd->endpoint;
|
||||
struct cxl_port *parent_port;
|
||||
struct device *parent;
|
||||
struct device *host = endpoint_host(endpoint);
|
||||
|
||||
parent_port = cxl_mem_find_port(cxlmd, NULL);
|
||||
if (!parent_port)
|
||||
goto out;
|
||||
parent = &parent_port->dev;
|
||||
|
||||
device_lock(parent);
|
||||
if (parent->driver && !endpoint->dead) {
|
||||
devm_release_action(parent, cxl_unlink_parent_dport, endpoint);
|
||||
devm_release_action(parent, cxl_unlink_uport, endpoint);
|
||||
devm_release_action(parent, unregister_port, endpoint);
|
||||
device_lock(host);
|
||||
if (host->driver && !endpoint->dead) {
|
||||
devm_release_action(host, cxl_unlink_parent_dport, endpoint);
|
||||
devm_release_action(host, cxl_unlink_uport, endpoint);
|
||||
devm_release_action(host, unregister_port, endpoint);
|
||||
}
|
||||
cxlmd->endpoint = NULL;
|
||||
device_unlock(parent);
|
||||
put_device(parent);
|
||||
out:
|
||||
device_unlock(host);
|
||||
put_device(&endpoint->dev);
|
||||
put_device(host);
|
||||
}
|
||||
|
||||
int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
|
||||
{
|
||||
struct device *host = endpoint_host(endpoint);
|
||||
struct device *dev = &cxlmd->dev;
|
||||
|
||||
get_device(host);
|
||||
get_device(&endpoint->dev);
|
||||
cxlmd->endpoint = endpoint;
|
||||
cxlmd->depth = endpoint->depth;
|
||||
|
|
|
@ -1127,7 +1127,14 @@ static int cxl_port_setup_targets(struct cxl_port *port,
|
|||
}
|
||||
|
||||
if (is_cxl_root(parent_port)) {
|
||||
parent_ig = cxlrd->cxlsd.cxld.interleave_granularity;
|
||||
/*
|
||||
* Root decoder IG is always set to value in CFMWS which
|
||||
* may be different than this region's IG. We can use the
|
||||
* region's IG here since interleave_granularity_store()
|
||||
* does not allow interleaved host-bridges with
|
||||
* root IG != region IG.
|
||||
*/
|
||||
parent_ig = p->interleave_granularity;
|
||||
parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
|
||||
/*
|
||||
* For purposes of address bit routing, use power-of-2 math for
|
||||
|
@ -1676,6 +1683,12 @@ static int cxl_region_attach(struct cxl_region *cxlr,
|
|||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (p->nr_targets >= p->interleave_ways) {
|
||||
dev_dbg(&cxlr->dev, "region already has %d endpoints\n",
|
||||
p->nr_targets);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ep_port = cxled_to_port(cxled);
|
||||
root_port = cxlrd_to_port(cxlrd);
|
||||
dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
|
||||
|
@ -1768,7 +1781,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
|
|||
if (p->nr_targets == p->interleave_ways) {
|
||||
rc = cxl_region_setup_targets(cxlr);
|
||||
if (rc)
|
||||
goto err_decrement;
|
||||
return rc;
|
||||
p->state = CXL_CONFIG_ACTIVE;
|
||||
}
|
||||
|
||||
|
@ -1800,12 +1813,6 @@ static int cxl_region_attach(struct cxl_region *cxlr,
|
|||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_decrement:
|
||||
p->nr_targets--;
|
||||
cxled->pos = -1;
|
||||
p->targets[pos] = NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
|
||||
|
|
|
@ -490,7 +490,7 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
|
|||
src_maxburst = chan->dma_config.src_maxburst;
|
||||
dst_maxburst = chan->dma_config.dst_maxburst;
|
||||
|
||||
ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
|
||||
ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
|
||||
ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
|
||||
ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
|
||||
|
||||
|
@ -966,7 +966,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
|
|||
if (!desc)
|
||||
return NULL;
|
||||
|
||||
ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
|
||||
ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
|
||||
ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
|
||||
ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
|
||||
cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
|
||||
|
|
|
@ -171,6 +171,12 @@ static enum qcom_scm_convention __get_convention(void)
|
|||
if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
|
||||
return qcom_scm_convention;
|
||||
|
||||
/*
|
||||
* Per the "SMC calling convention specification", the 64-bit calling
|
||||
* convention can only be used when the client is 64-bit, otherwise
|
||||
* system will encounter the undefined behaviour.
|
||||
*/
|
||||
#if IS_ENABLED(CONFIG_ARM64)
|
||||
/*
|
||||
* Device isn't required as there is only one argument - no device
|
||||
* needed to dma_map_single to secure world
|
||||
|
@ -191,6 +197,7 @@ static enum qcom_scm_convention __get_convention(void)
|
|||
forced = true;
|
||||
goto found;
|
||||
}
|
||||
#endif
|
||||
|
||||
probed_convention = SMC_CONVENTION_ARM_32;
|
||||
ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
|
||||
|
|
|
@ -1655,6 +1655,26 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
|
|||
.ignore_wake = "SYNA1202:00@16",
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to
|
||||
* a "dolby" button. At the ACPI level an _AEI event-handler
|
||||
* is connected which sets an ACPI variable to 1 on both
|
||||
* edges. This variable can be polled + cleared to 0 using
|
||||
* WMI. But since the variable is set on both edges the WMI
|
||||
* interface is pretty useless even when polling.
|
||||
* So instead the x86-android-tablets code instantiates
|
||||
* a gpio-keys platform device for it.
|
||||
* Ignore the _AEI handler for the pin, so that it is not busy.
|
||||
*/
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
|
||||
},
|
||||
.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
|
||||
.ignore_interrupt = "INT33FC:00@3",
|
||||
},
|
||||
},
|
||||
{} /* Terminating entry */
|
||||
};
|
||||
|
||||
|
|
|
@ -496,6 +496,10 @@ static struct gpio_desc *of_find_gpio_rename(struct device_node *np,
|
|||
#if IS_ENABLED(CONFIG_SND_SOC_CS42L56)
|
||||
{ "reset", "cirrus,gpio-nreset", "cirrus,cs42l56" },
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_SND_SOC_MT2701_CS42448)
|
||||
{ "i2s1-in-sel-gpio1", NULL, "mediatek,mt2701-cs42448-machine" },
|
||||
{ "i2s1-in-sel-gpio2", NULL, "mediatek,mt2701-cs42448-machine" },
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_SND_SOC_TLV320AIC3X)
|
||||
{ "reset", "gpio-reset", "ti,tlv320aic3x" },
|
||||
{ "reset", "gpio-reset", "ti,tlv320aic33" },
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "amdgpu.h"
|
||||
#include "atom.h"
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/acpi.h>
|
||||
|
@ -287,6 +288,10 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
|
|||
if (adev->flags & AMD_IS_APU)
|
||||
return false;
|
||||
|
||||
/* ATRM is for on-platform devices only */
|
||||
if (dev_is_removable(&adev->pdev->dev))
|
||||
return false;
|
||||
|
||||
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
|
||||
dhandle = ACPI_HANDLE(&pdev->dev);
|
||||
if (!dhandle)
|
||||
|
|
|
@ -179,6 +179,7 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
|
|||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
*result = NULL;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
|
|
|
@ -1438,7 +1438,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
if (r == -ENOMEM)
|
||||
DRM_ERROR("Not enough memory for command submission!\n");
|
||||
else if (r != -ERESTARTSYS && r != -EAGAIN)
|
||||
DRM_ERROR("Failed to process the buffer list %d!\n", r);
|
||||
DRM_DEBUG("Failed to process the buffer list %d!\n", r);
|
||||
goto error_fini;
|
||||
}
|
||||
|
||||
|
|
|
@ -747,6 +747,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
|
|||
ssize_t result = 0;
|
||||
int r;
|
||||
|
||||
if (!adev->smc_rreg)
|
||||
return -EPERM;
|
||||
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -803,6 +806,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
|
|||
ssize_t result = 0;
|
||||
int r;
|
||||
|
||||
if (!adev->smc_wreg)
|
||||
return -EPERM;
|
||||
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/vgaarb.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <linux/efi.h>
|
||||
|
@ -2233,7 +2234,6 @@ out:
|
|||
*/
|
||||
static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct drm_device *dev = adev_to_drm(adev);
|
||||
struct pci_dev *parent;
|
||||
int i, r;
|
||||
bool total;
|
||||
|
@ -2304,7 +2304,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
|||
(amdgpu_is_atpx_hybrid() ||
|
||||
amdgpu_has_atpx_dgpu_power_cntl()) &&
|
||||
((adev->flags & AMD_IS_APU) == 0) &&
|
||||
!pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
|
||||
!dev_is_removable(&adev->pdev->dev))
|
||||
adev->flags |= AMD_IS_PX;
|
||||
|
||||
if (!(adev->flags & AMD_IS_APU)) {
|
||||
|
@ -2318,6 +2318,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
|||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||
if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
|
||||
adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
|
||||
if (!amdgpu_device_pcie_dynamic_switching_supported())
|
||||
adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
|
||||
|
||||
total = true;
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
|
@ -4130,7 +4132,7 @@ fence_driver_init:
|
|||
|
||||
px = amdgpu_device_supports_px(ddev);
|
||||
|
||||
if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
|
||||
if (px || (!dev_is_removable(&adev->pdev->dev) &&
|
||||
apple_gmux_detect(NULL, NULL)))
|
||||
vga_switcheroo_register_client(adev->pdev,
|
||||
&amdgpu_switcheroo_ops, px);
|
||||
|
@ -4276,7 +4278,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
|
|||
|
||||
px = amdgpu_device_supports_px(adev_to_drm(adev));
|
||||
|
||||
if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
|
||||
if (px || (!dev_is_removable(&adev->pdev->dev) &&
|
||||
apple_gmux_detect(NULL, NULL)))
|
||||
vga_switcheroo_unregister_client(adev->pdev);
|
||||
|
||||
|
@ -5399,7 +5401,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
* Flush RAM to disk so that after reboot
|
||||
* the user can read log and see why the system rebooted.
|
||||
*/
|
||||
if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
|
||||
if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
|
||||
amdgpu_ras_get_context(adev)->reboot) {
|
||||
DRM_WARN("Emergency reboot.");
|
||||
|
||||
ksys_sync_helper();
|
||||
|
|
|
@ -92,6 +92,7 @@
|
|||
MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
|
||||
|
||||
#define mmRCC_CONFIG_MEMSIZE 0xde3
|
||||
#define mmMP0_SMN_C2PMSG_33 0x16061
|
||||
#define mmMM_INDEX 0x0
|
||||
#define mmMM_INDEX_HI 0x6
|
||||
#define mmMM_DATA 0x1
|
||||
|
@ -230,8 +231,26 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
|
|||
static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
|
||||
uint8_t *binary)
|
||||
{
|
||||
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
|
||||
int ret = 0;
|
||||
uint64_t vram_size;
|
||||
u32 msg;
|
||||
int i, ret = 0;
|
||||
|
||||
/* It can take up to a second for IFWI init to complete on some dGPUs,
|
||||
* but generally it should be in the 60-100ms range. Normally this starts
|
||||
* as soon as the device gets power so by the time the OS loads this has long
|
||||
* completed. However, when a card is hotplugged via e.g., USB4, we need to
|
||||
* wait for this to complete. Once the C2PMSG is updated, we can
|
||||
* continue.
|
||||
*/
|
||||
if (dev_is_removable(&adev->pdev->dev)) {
|
||||
for (i = 0; i < 1000; i++) {
|
||||
msg = RREG32(mmMP0_SMN_C2PMSG_33);
|
||||
if (msg & 0x80000000)
|
||||
break;
|
||||
msleep(1);
|
||||
}
|
||||
}
|
||||
vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
|
||||
|
||||
if (vram_size) {
|
||||
uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
|
||||
|
|
|
@ -627,8 +627,20 @@ static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
|
|||
mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
|
||||
mqd_prop.hqd_active = false;
|
||||
|
||||
if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
|
||||
p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||
mutex_lock(&adev->srbm_mutex);
|
||||
amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
|
||||
}
|
||||
|
||||
mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
|
||||
|
||||
if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
|
||||
p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||
amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
|
||||
mutex_unlock(&adev->srbm_mutex);
|
||||
}
|
||||
|
||||
amdgpu_bo_unreserve(q->mqd_obj);
|
||||
}
|
||||
|
||||
|
@ -1062,9 +1074,13 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
|
|||
switch (queue_type) {
|
||||
case AMDGPU_RING_TYPE_GFX:
|
||||
ring->funcs = adev->gfx.gfx_ring[0].funcs;
|
||||
ring->me = adev->gfx.gfx_ring[0].me;
|
||||
ring->pipe = adev->gfx.gfx_ring[0].pipe;
|
||||
break;
|
||||
case AMDGPU_RING_TYPE_COMPUTE:
|
||||
ring->funcs = adev->gfx.compute_ring[0].funcs;
|
||||
ring->me = adev->gfx.compute_ring[0].me;
|
||||
ring->pipe = adev->gfx.compute_ring[0].pipe;
|
||||
break;
|
||||
case AMDGPU_RING_TYPE_SDMA:
|
||||
ring->funcs = adev->sdma.instance[0].ring.funcs;
|
||||
|
|
|
@ -1380,6 +1380,7 @@ static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
|
|||
{
|
||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||
|
||||
if (adev->dev->kobj.sd)
|
||||
sysfs_remove_file_from_group(&adev->dev->kobj,
|
||||
&con->badpages_attr.attr,
|
||||
RAS_FS_NAME);
|
||||
|
@ -1397,6 +1398,7 @@ static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
|
|||
.attrs = attrs,
|
||||
};
|
||||
|
||||
if (adev->dev->kobj.sd)
|
||||
sysfs_remove_group(&adev->dev->kobj, &group);
|
||||
|
||||
return 0;
|
||||
|
@ -1444,6 +1446,7 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
|
|||
if (!obj || !obj->attr_inuse)
|
||||
return -EINVAL;
|
||||
|
||||
if (adev->dev->kobj.sd)
|
||||
sysfs_remove_file_from_group(&adev->dev->kobj,
|
||||
&obj->sysfs_attr.attr,
|
||||
RAS_FS_NAME);
|
||||
|
|
|
@ -292,8 +292,15 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
|
|||
void *ptr;
|
||||
int i, idx;
|
||||
|
||||
bool in_ras_intr = amdgpu_ras_intr_triggered();
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
|
||||
/* err_event_athub will corrupt VCPU buffer, so we need to
|
||||
* restore fw data and clear buffer in amdgpu_vcn_resume() */
|
||||
if (in_ras_intr)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
|
|
@ -239,6 +239,8 @@ static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector)
|
|||
|
||||
for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
|
||||
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
|
||||
if (!mode)
|
||||
continue;
|
||||
drm_mode_probed_add(connector, mode);
|
||||
}
|
||||
|
||||
|
|
|
@ -1099,8 +1099,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||
bo = gem_to_amdgpu_bo(gobj);
|
||||
}
|
||||
mem = bo->tbo.resource;
|
||||
if (mem->mem_type == TTM_PL_TT ||
|
||||
mem->mem_type == AMDGPU_PL_PREEMPT)
|
||||
if (mem && (mem->mem_type == TTM_PL_TT ||
|
||||
mem->mem_type == AMDGPU_PL_PREEMPT))
|
||||
pages_addr = bo->tbo.ttm->dma_address;
|
||||
}
|
||||
|
||||
|
@ -2129,7 +2129,8 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
|
|||
* Returns:
|
||||
* 0 for success, error for failure.
|
||||
*/
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id)
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
int32_t xcp_id)
|
||||
{
|
||||
struct amdgpu_bo *root_bo;
|
||||
struct amdgpu_bo_vm *root;
|
||||
|
@ -2148,6 +2149,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp
|
|||
INIT_LIST_HEAD(&vm->done);
|
||||
INIT_LIST_HEAD(&vm->pt_freed);
|
||||
INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
|
||||
INIT_KFIFO(vm->faults);
|
||||
|
||||
r = amdgpu_vm_init_entities(adev, vm);
|
||||
if (r)
|
||||
|
@ -2182,34 +2184,33 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp
|
|||
false, &root, xcp_id);
|
||||
if (r)
|
||||
goto error_free_delayed;
|
||||
root_bo = &root->bo;
|
||||
|
||||
root_bo = amdgpu_bo_ref(&root->bo);
|
||||
r = amdgpu_bo_reserve(root_bo, true);
|
||||
if (r) {
|
||||
amdgpu_bo_unref(&root->shadow);
|
||||
amdgpu_bo_unref(&root_bo);
|
||||
goto error_free_delayed;
|
||||
}
|
||||
|
||||
amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
|
||||
r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
|
||||
if (r)
|
||||
goto error_free_root;
|
||||
|
||||
r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
|
||||
amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
|
||||
|
||||
r = amdgpu_vm_pt_clear(adev, vm, root, false);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
goto error_free_root;
|
||||
|
||||
amdgpu_bo_unreserve(vm->root.bo);
|
||||
|
||||
INIT_KFIFO(vm->faults);
|
||||
amdgpu_bo_unref(&root_bo);
|
||||
|
||||
return 0;
|
||||
|
||||
error_unreserve:
|
||||
amdgpu_bo_unreserve(vm->root.bo);
|
||||
|
||||
error_free_root:
|
||||
amdgpu_bo_unref(&root->shadow);
|
||||
amdgpu_vm_pt_free_root(adev, vm);
|
||||
amdgpu_bo_unreserve(vm->root.bo);
|
||||
amdgpu_bo_unref(&root_bo);
|
||||
vm->root.bo = NULL;
|
||||
|
||||
error_free_delayed:
|
||||
dma_fence_put(vm->last_tlb_flush);
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "nbio/nbio_2_3_offset.h"
|
||||
#include "nbio/nbio_2_3_sh_mask.h"
|
||||
#include <uapi/linux/kfd_ioctl.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#define smnPCIE_CONFIG_CNTL 0x11180044
|
||||
|
@ -361,7 +362,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
|
|||
|
||||
data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
|
||||
|
||||
if (pci_is_thunderbolt_attached(adev->pdev))
|
||||
if (dev_is_removable(&adev->pdev->dev))
|
||||
data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
|
||||
else
|
||||
data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
|
||||
|
@ -480,7 +481,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
|
|||
|
||||
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
|
||||
data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
|
||||
if (pci_is_thunderbolt_attached(adev->pdev))
|
||||
if (dev_is_removable(&adev->pdev->dev))
|
||||
data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
|
||||
else
|
||||
data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
|
||||
|
|
|
@ -268,7 +268,7 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
|
|||
SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
|
||||
switch (encoding) {
|
||||
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
|
||||
pr_debug(
|
||||
pr_debug_ratelimited(
|
||||
"sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf0_full %d, ttrac_buf1_full %d, ttrace_utc_err %d\n",
|
||||
REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_AUTO_CTXID1,
|
||||
SE_ID),
|
||||
|
@ -284,7 +284,7 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
|
|||
THREAD_TRACE_UTC_ERROR));
|
||||
break;
|
||||
case SQ_INTERRUPT_WORD_ENCODING_INST:
|
||||
pr_debug("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
|
||||
pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
|
||||
REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
|
||||
SE_ID),
|
||||
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
|
||||
|
@ -310,7 +310,7 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
|
|||
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
|
||||
sq_intr_err_type = REG_GET_FIELD(context_id0, KFD_CTXID0,
|
||||
ERR_TYPE);
|
||||
pr_warn("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
|
||||
pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
|
||||
REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
|
||||
SE_ID),
|
||||
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
|
||||
|
|
|
@ -150,7 +150,7 @@ enum SQ_INTERRUPT_ERROR_TYPE {
|
|||
|
||||
static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
|
||||
{
|
||||
pr_debug(
|
||||
pr_debug_ratelimited(
|
||||
"sq_intr: auto, ttrace %d, wlt %d, ttrace_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
|
||||
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE),
|
||||
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, WLT),
|
||||
|
@ -165,7 +165,7 @@ static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
|
|||
|
||||
static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
|
||||
{
|
||||
pr_debug(
|
||||
pr_debug_ratelimited(
|
||||
"sq_intr: inst, data 0x%08x, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
|
||||
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, DATA),
|
||||
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, SH_ID),
|
||||
|
@ -177,7 +177,7 @@ static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
|
|||
|
||||
static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1)
|
||||
{
|
||||
pr_warn(
|
||||
pr_warn_ratelimited(
|
||||
"sq_intr: error, detail 0x%08x, type %d, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
|
||||
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, DETAIL),
|
||||
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE),
|
||||
|
|
|
@ -333,7 +333,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
|
|||
encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING);
|
||||
switch (encoding) {
|
||||
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
|
||||
pr_debug(
|
||||
pr_debug_ratelimited(
|
||||
"sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
|
||||
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID),
|
||||
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE),
|
||||
|
@ -347,7 +347,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
|
|||
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR));
|
||||
break;
|
||||
case SQ_INTERRUPT_WORD_ENCODING_INST:
|
||||
pr_debug("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
|
||||
pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
|
||||
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
|
||||
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
|
||||
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
|
||||
|
@ -366,7 +366,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
|
|||
break;
|
||||
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
|
||||
sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE);
|
||||
pr_warn("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
|
||||
pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
|
||||
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
|
||||
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
|
||||
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
|
||||
|
|
|
@ -617,8 +617,15 @@ create_bo_failed:
|
|||
|
||||
void svm_range_vram_node_free(struct svm_range *prange)
|
||||
{
|
||||
svm_range_bo_unref(prange->svm_bo);
|
||||
/* serialize prange->svm_bo unref */
|
||||
mutex_lock(&prange->lock);
|
||||
/* prange->svm_bo has not been unref */
|
||||
if (prange->ttm_res) {
|
||||
prange->ttm_res = NULL;
|
||||
mutex_unlock(&prange->lock);
|
||||
svm_range_bo_unref(prange->svm_bo);
|
||||
} else
|
||||
mutex_unlock(&prange->lock);
|
||||
}
|
||||
|
||||
struct kfd_node *
|
||||
|
@ -749,7 +756,7 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
|
|||
prange->flags &= ~attrs[i].value;
|
||||
break;
|
||||
case KFD_IOCTL_SVM_ATTR_GRANULARITY:
|
||||
prange->granularity = attrs[i].value;
|
||||
prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
|
||||
|
|
|
@ -2077,7 +2077,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
|||
struct dmub_srv_create_params create_params;
|
||||
struct dmub_srv_region_params region_params;
|
||||
struct dmub_srv_region_info region_info;
|
||||
struct dmub_srv_fb_params fb_params;
|
||||
struct dmub_srv_memory_params memory_params;
|
||||
struct dmub_srv_fb_info *fb_info;
|
||||
struct dmub_srv *dmub_srv;
|
||||
const struct dmcub_firmware_header_v1_0 *hdr;
|
||||
|
@ -2177,6 +2177,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
|||
adev->dm.dmub_fw->data +
|
||||
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
|
||||
PSP_HEADER_BYTES;
|
||||
region_params.is_mailbox_in_inbox = false;
|
||||
|
||||
status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
|
||||
®ion_info);
|
||||
|
@ -2200,10 +2201,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
|||
return r;
|
||||
|
||||
/* Rebase the regions on the framebuffer address. */
|
||||
memset(&fb_params, 0, sizeof(fb_params));
|
||||
fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
|
||||
fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
|
||||
fb_params.region_info = ®ion_info;
|
||||
memset(&memory_params, 0, sizeof(memory_params));
|
||||
memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
|
||||
memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
|
||||
memory_params.region_info = ®ion_info;
|
||||
|
||||
adev->dm.dmub_fb_info =
|
||||
kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
|
||||
|
@ -2215,7 +2216,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
|
||||
status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
|
||||
if (status != DMUB_STATUS_OK) {
|
||||
DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
|
||||
return -EINVAL;
|
||||
|
@ -7394,6 +7395,9 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
int i;
|
||||
int result = -EIO;
|
||||
|
||||
if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
|
||||
return result;
|
||||
|
||||
cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
|
||||
|
||||
if (!cmd.payloads)
|
||||
|
@ -9504,14 +9508,14 @@ static bool should_reset_plane(struct drm_atomic_state *state,
|
|||
struct drm_plane *other;
|
||||
struct drm_plane_state *old_other_state, *new_other_state;
|
||||
struct drm_crtc_state *new_crtc_state;
|
||||
struct amdgpu_device *adev = drm_to_adev(plane->dev);
|
||||
int i;
|
||||
|
||||
/*
|
||||
* TODO: Remove this hack once the checks below are sufficient
|
||||
* enough to determine when we need to reset all the planes on
|
||||
* the stream.
|
||||
* TODO: Remove this hack for all asics once it proves that the
|
||||
* fast updates works fine on DCN3.2+.
|
||||
*/
|
||||
if (state->allow_modeset)
|
||||
if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset)
|
||||
return true;
|
||||
|
||||
/* Exit early if we know that we're adding or removing the plane. */
|
||||
|
|
|
@ -1591,31 +1591,31 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
|||
unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
|
||||
unsigned int max_compressed_bw_in_kbps = 0;
|
||||
struct dc_dsc_bw_range bw_range = {0};
|
||||
struct drm_dp_mst_topology_mgr *mst_mgr;
|
||||
uint16_t full_pbn = aconnector->mst_output_port->full_pbn;
|
||||
|
||||
/*
|
||||
* check if the mode could be supported if DSC pass-through is supported
|
||||
* AND check if there enough bandwidth available to support the mode
|
||||
* with DSC enabled.
|
||||
* Consider the case with the depth of the mst topology tree is equal or less than 2
|
||||
* A. When dsc bitstream can be transmitted along the entire path
|
||||
* 1. dsc is possible between source and branch/leaf device (common dsc params is possible), AND
|
||||
* 2. dsc passthrough supported at MST branch, or
|
||||
* 3. dsc decoding supported at leaf MST device
|
||||
* Use maximum dsc compression as bw constraint
|
||||
* B. When dsc bitstream cannot be transmitted along the entire path
|
||||
* Use native bw as bw constraint
|
||||
*/
|
||||
if (is_dsc_common_config_possible(stream, &bw_range) &&
|
||||
aconnector->mst_output_port->passthrough_aux) {
|
||||
mst_mgr = aconnector->mst_output_port->mgr;
|
||||
mutex_lock(&mst_mgr->lock);
|
||||
|
||||
(aconnector->mst_output_port->passthrough_aux ||
|
||||
aconnector->dsc_aux == &aconnector->mst_output_port->aux)) {
|
||||
cur_link_settings = stream->link->verified_link_cap;
|
||||
|
||||
upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
|
||||
&cur_link_settings
|
||||
);
|
||||
down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
|
||||
&cur_link_settings);
|
||||
down_link_bw_in_kbps = kbps_from_pbn(full_pbn);
|
||||
|
||||
/* pick the bottleneck */
|
||||
end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
|
||||
down_link_bw_in_kbps);
|
||||
|
||||
mutex_unlock(&mst_mgr->lock);
|
||||
|
||||
/*
|
||||
* use the maximum dsc compression bandwidth as the required
|
||||
* bandwidth for the mode
|
||||
|
@ -1630,8 +1630,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
|||
/* check if mode could be supported within full_pbn */
|
||||
bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
|
||||
pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
|
||||
|
||||
if (pbn > aconnector->mst_output_port->full_pbn)
|
||||
if (pbn > full_pbn)
|
||||
return DC_FAIL_BANDWIDTH_VALIDATE;
|
||||
}
|
||||
|
||||
|
|
|
@ -993,7 +993,8 @@ static bool dc_construct(struct dc *dc,
|
|||
/* set i2c speed if not done by the respective dcnxxx__resource.c */
|
||||
if (dc->caps.i2c_speed_in_khz_hdcp == 0)
|
||||
dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
|
||||
|
||||
if (dc->caps.max_optimizable_video_width == 0)
|
||||
dc->caps.max_optimizable_video_width = 5120;
|
||||
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
|
||||
if (!dc->clk_mgr)
|
||||
goto fail;
|
||||
|
@ -1070,53 +1071,6 @@ static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *contex
|
|||
}
|
||||
}
|
||||
|
||||
static void phantom_pipe_blank(
|
||||
struct dc *dc,
|
||||
struct timing_generator *tg,
|
||||
int width,
|
||||
int height)
|
||||
{
|
||||
struct dce_hwseq *hws = dc->hwseq;
|
||||
enum dc_color_space color_space;
|
||||
struct tg_color black_color = {0};
|
||||
struct output_pixel_processor *opp = NULL;
|
||||
uint32_t num_opps, opp_id_src0, opp_id_src1;
|
||||
uint32_t otg_active_width, otg_active_height;
|
||||
uint32_t i;
|
||||
|
||||
/* program opp dpg blank color */
|
||||
color_space = COLOR_SPACE_SRGB;
|
||||
color_space_to_black_color(dc, color_space, &black_color);
|
||||
|
||||
otg_active_width = width;
|
||||
otg_active_height = height;
|
||||
|
||||
/* get the OPTC source */
|
||||
tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
|
||||
ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
|
||||
|
||||
for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
|
||||
if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
|
||||
opp = dc->res_pool->opps[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (opp && opp->funcs->opp_set_disp_pattern_generator)
|
||||
opp->funcs->opp_set_disp_pattern_generator(
|
||||
opp,
|
||||
CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
|
||||
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
|
||||
COLOR_DEPTH_UNDEFINED,
|
||||
&black_color,
|
||||
otg_active_width,
|
||||
otg_active_height,
|
||||
0);
|
||||
|
||||
if (tg->funcs->is_tg_enabled(tg))
|
||||
hws->funcs.wait_for_blank_complete(opp);
|
||||
}
|
||||
|
||||
static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
|
||||
|
@ -1207,7 +1161,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
|
|||
|
||||
main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
|
||||
main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
|
||||
phantom_pipe_blank(dc, tg, main_pipe_width, main_pipe_height);
|
||||
if (dc->hwss.blank_phantom)
|
||||
dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
|
||||
tg->funcs->enable_crtc(tg);
|
||||
}
|
||||
}
|
||||
|
@ -2476,6 +2431,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
|
|||
}
|
||||
|
||||
static enum surface_update_type get_scaling_info_update_type(
|
||||
const struct dc *dc,
|
||||
const struct dc_surface_update *u)
|
||||
{
|
||||
union surface_update_flags *update_flags = &u->surface->update_flags;
|
||||
|
@ -2510,6 +2466,12 @@ static enum surface_update_type get_scaling_info_update_type(
|
|||
update_flags->bits.clock_change = 1;
|
||||
}
|
||||
|
||||
if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
|
||||
(u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
|
||||
u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
|
||||
/* Changing clip size of a large surface may result in MPC slice count change */
|
||||
update_flags->bits.bandwidth_change = 1;
|
||||
|
||||
if (u->scaling_info->src_rect.x != u->surface->src_rect.x
|
||||
|| u->scaling_info->src_rect.y != u->surface->src_rect.y
|
||||
|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
|
||||
|
@ -2547,7 +2509,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
|
|||
type = get_plane_info_update_type(u);
|
||||
elevate_update_type(&overall_type, type);
|
||||
|
||||
type = get_scaling_info_update_type(u);
|
||||
type = get_scaling_info_update_type(dc, u);
|
||||
elevate_update_type(&overall_type, type);
|
||||
|
||||
if (u->flip_addr) {
|
||||
|
@ -4460,6 +4422,14 @@ bool dc_update_planes_and_stream(struct dc *dc,
|
|||
update_type,
|
||||
context);
|
||||
} else {
|
||||
if (!stream_update &&
|
||||
dc->hwss.is_pipe_topology_transition_seamless &&
|
||||
!dc->hwss.is_pipe_topology_transition_seamless(
|
||||
dc, dc->current_state, context)) {
|
||||
|
||||
DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
|
||||
BREAK_TO_DEBUGGER();
|
||||
}
|
||||
commit_planes_for_stream(
|
||||
dc,
|
||||
srf_updates,
|
||||
|
|
|
@ -556,7 +556,7 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
|
|||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
|
||||
|
||||
if (res_ctx->pipe_ctx[i].stream != stream)
|
||||
if (res_ctx->pipe_ctx[i].stream != stream || !tg)
|
||||
continue;
|
||||
|
||||
return tg->funcs->get_frame_count(tg);
|
||||
|
@ -615,7 +615,7 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
|
|||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
|
||||
|
||||
if (res_ctx->pipe_ctx[i].stream != stream)
|
||||
if (res_ctx->pipe_ctx[i].stream != stream || !tg)
|
||||
continue;
|
||||
|
||||
tg->funcs->get_scanoutpos(tg,
|
||||
|
|
|
@ -229,6 +229,11 @@ struct dc_caps {
|
|||
uint32_t dmdata_alloc_size;
|
||||
unsigned int max_cursor_size;
|
||||
unsigned int max_video_width;
|
||||
/*
|
||||
* max video plane width that can be safely assumed to be always
|
||||
* supported by single DPP pipe.
|
||||
*/
|
||||
unsigned int max_optimizable_video_width;
|
||||
unsigned int min_horizontal_blanking_period;
|
||||
int linear_pitch_alignment;
|
||||
bool dcc_const_color;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user