mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
Add Renesas RZ/G3L RSPI support
Biju <biju.das.au@gmail.com> says: This patch series adds binding and driver support for RSPI IP found on the RZ/G3L SoC. The RSPI is compatible with RZ/V2H RSPI, but has 2 clocks compared to 3 on RZ/V2H. Link: https://patch.msgid.link/20260408085418.18770-1-biju.das.jz@bp.renesas.com
This commit is contained in:
commit
2c9e7a5f2e
|
|
@ -1,6 +1,7 @@
|
|||
Alan Cox <alan@lxorguk.ukuu.org.uk>
|
||||
Alan Cox <root@hraefn.swansea.linux.org.uk>
|
||||
Alyssa Rosenzweig <alyssa@rosenzweig.io>
|
||||
Askar Safin <safinaskar@gmail.com>
|
||||
Christoph Hellwig <hch@lst.de>
|
||||
Jeff Kirsher <jeffrey.t.kirsher@intel.com>
|
||||
Marc Gonzalez <marc.w.gonzalez@free.fr>
|
||||
|
|
|
|||
2
.mailmap
2
.mailmap
|
|
@ -316,6 +316,7 @@ Hans Verkuil <hverkuil@kernel.org> <hverkuil-cisco@xs4all.nl>
|
|||
Hans Verkuil <hverkuil@kernel.org> <hansverk@cisco.com>
|
||||
Hao Ge <hao.ge@linux.dev> <gehao@kylinos.cn>
|
||||
Harry Yoo <harry.yoo@oracle.com> <42.hyeyoo@gmail.com>
|
||||
Harry Yoo <harry@kernel.org> <harry.yoo@oracle.com>
|
||||
Heiko Carstens <hca@linux.ibm.com> <h.carstens@de.ibm.com>
|
||||
Heiko Carstens <hca@linux.ibm.com> <heiko.carstens@de.ibm.com>
|
||||
Heiko Stuebner <heiko@sntech.de> <heiko.stuebner@bqreaders.com>
|
||||
|
|
@ -587,6 +588,7 @@ Morten Welinder <terra@gnome.org>
|
|||
Morten Welinder <welinder@anemone.rentec.com>
|
||||
Morten Welinder <welinder@darter.rentec.com>
|
||||
Morten Welinder <welinder@troll.com>
|
||||
Muhammad Usama Anjum <usama.anjum@arm.com> <usama.anjum@collabora.com>
|
||||
Mukesh Ojha <quic_mojha@quicinc.com> <mojha@codeaurora.org>
|
||||
Muna Sinada <quic_msinada@quicinc.com> <msinada@codeaurora.org>
|
||||
Murali Nalajala <quic_mnalajal@quicinc.com> <mnalajal@codeaurora.org>
|
||||
|
|
|
|||
|
|
@ -85,6 +85,16 @@ In the example, 'Requester ID' means the ID of the device that sent
|
|||
the error message to the Root Port. Please refer to PCIe specs for other
|
||||
fields.
|
||||
|
||||
The 'TLP Header' is the prefix/header of the TLP that caused the error
|
||||
in raw hex format. To decode the TLP Header into human-readable form
|
||||
one may use tlp-tool:
|
||||
|
||||
https://github.com/mmpg-x86/tlp-tool
|
||||
|
||||
Example usage::
|
||||
|
||||
curl -L https://git.kernel.org/linus/2ca1c94ce0b6 | rtlp-tool --aer
|
||||
|
||||
AER Ratelimits
|
||||
--------------
|
||||
|
||||
|
|
|
|||
|
|
@ -149,11 +149,33 @@ For architectures that require cache flushing for DMA coherence
|
|||
DMA_ATTR_MMIO will not perform any cache flushing. The address
|
||||
provided must never be mapped cacheable into the CPU.
|
||||
|
||||
DMA_ATTR_CPU_CACHE_CLEAN
|
||||
------------------------
|
||||
DMA_ATTR_DEBUGGING_IGNORE_CACHELINES
|
||||
------------------------------------
|
||||
|
||||
This attribute indicates the CPU will not dirty any cacheline overlapping this
|
||||
DMA_FROM_DEVICE/DMA_BIDIRECTIONAL buffer while it is mapped. This allows
|
||||
multiple small buffers to safely share a cacheline without risk of data
|
||||
corruption, suppressing DMA debug warnings about overlapping mappings.
|
||||
All mappings sharing a cacheline should have this attribute.
|
||||
This attribute indicates that CPU cache lines may overlap for buffers mapped
|
||||
with DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
|
||||
|
||||
Such overlap may occur when callers map multiple small buffers that reside
|
||||
within the same cache line. In this case, callers must guarantee that the CPU
|
||||
will not dirty these cache lines after the mappings are established. When this
|
||||
condition is met, multiple buffers can safely share a cache line without risking
|
||||
data corruption.
|
||||
|
||||
All mappings that share a cache line must set this attribute to suppress DMA
|
||||
debug warnings about overlapping mappings.
|
||||
|
||||
DMA_ATTR_REQUIRE_COHERENT
|
||||
-------------------------
|
||||
|
||||
DMA mapping requests with the DMA_ATTR_REQUIRE_COHERENT fail on any
|
||||
system where SWIOTLB or cache management is required. This should only
|
||||
be used to support uAPI designs that require continuous HW DMA
|
||||
coherence with userspace processes, for example RDMA and DRM. At a
|
||||
minimum the memory being mapped must be userspace memory from
|
||||
pin_user_pages() or similar.
|
||||
|
||||
Drivers should consider using dma_mmap_pages() instead of this
|
||||
interface when building their uAPIs, when possible.
|
||||
|
||||
It must never be used in an in-kernel driver that only works with
|
||||
kernel memory.
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ then:
|
|||
required:
|
||||
- refresh-rate-hz
|
||||
|
||||
additionalProperties: false
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
|
|
|
|||
|
|
@ -301,6 +301,7 @@ properties:
|
|||
maxItems: 4
|
||||
|
||||
dependencies:
|
||||
pd-disable: [typec-power-opmode]
|
||||
sink-vdos-v1: [ sink-vdos ]
|
||||
sink-vdos: [ sink-vdos-v1 ]
|
||||
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ properties:
|
|||
const: 2
|
||||
|
||||
"#interrupt-cells":
|
||||
const: 1
|
||||
const: 2
|
||||
|
||||
ngpios:
|
||||
description:
|
||||
|
|
@ -86,7 +86,7 @@ examples:
|
|||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
interrupts = <53>, <53>, <53>, <53>,
|
||||
<53>, <53>, <53>, <53>,
|
||||
<53>, <53>, <53>, <53>,
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ properties:
|
|||
- const: rockchip,rk3066-spdif
|
||||
- items:
|
||||
- enum:
|
||||
- rockchip,rk3576-spdif
|
||||
- rockchip,rk3588-spdif
|
||||
- const: rockchip,rk3568-spdif
|
||||
|
||||
|
|
|
|||
|
|
@ -164,7 +164,7 @@ allOf:
|
|||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: st,stm32mph7-sai
|
||||
const: st,stm32h7-sai
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ properties:
|
|||
compatible:
|
||||
oneOf:
|
||||
- enum:
|
||||
- renesas,r9a08g046-rspi # RZ/G3L
|
||||
- renesas,r9a09g057-rspi # RZ/V2H(P)
|
||||
- renesas,r9a09g077-rspi # RZ/T2H
|
||||
- items:
|
||||
|
|
@ -90,6 +91,34 @@ required:
|
|||
|
||||
allOf:
|
||||
- $ref: spi-controller.yaml#
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- renesas,r9a08g046-rspi
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
maxItems: 2
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: pclk
|
||||
- const: tclk
|
||||
|
||||
dmas:
|
||||
maxItems: 2
|
||||
|
||||
dma-names:
|
||||
items:
|
||||
- const: rx
|
||||
- const: tx
|
||||
|
||||
required:
|
||||
- resets
|
||||
- reset-names
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
|
|
|
|||
|
|
@ -783,6 +783,56 @@ controlled by the "uuid" mount option, which supports these values:
|
|||
mounted with "uuid=on".
|
||||
|
||||
|
||||
Durability and copy up
|
||||
----------------------
|
||||
|
||||
The fsync(2) system call ensures that the data and metadata of a file
|
||||
are safely written to the backing storage, which is expected to
|
||||
guarantee the existence of the information post system crash.
|
||||
|
||||
Without an fsync(2) call, there is no guarantee that the observed
|
||||
data after a system crash will be either the old or the new data, but
|
||||
in practice, the observed data after crash is often the old or new data
|
||||
or a mix of both.
|
||||
|
||||
When an overlayfs file is modified for the first time, copy up will
|
||||
create a copy of the lower file and its parent directories in the upper
|
||||
layer. Since the Linux filesystem API does not enforce any particular
|
||||
ordering on storing changes without explicit fsync(2) calls, in case
|
||||
of a system crash, the upper file could end up with no data at all
|
||||
(i.e. zeros), which would be an unusual outcome. To avoid this
|
||||
experience, overlayfs calls fsync(2) on the upper file before completing
|
||||
data copy up with rename(2) or link(2) to make the copy up "atomic".
|
||||
|
||||
By default, overlayfs does not explicitly call fsync(2) on copied up
|
||||
directories or on metadata-only copy up, so it provides no guarantee to
|
||||
persist the user's modification unless the user calls fsync(2).
|
||||
The fsync during copy up only guarantees that if a copy up is observed
|
||||
after a crash, the observed data is not zeroes or intermediate values
|
||||
from the copy up staging area.
|
||||
|
||||
On traditional local filesystems with a single journal (e.g. ext4, xfs),
|
||||
fsync on a file also persists the parent directory changes, because they
|
||||
are usually modified in the same transaction, so metadata durability during
|
||||
data copy up effectively comes for free. Overlayfs further limits risk by
|
||||
disallowing network filesystems as upper layer.
|
||||
|
||||
Overlayfs can be tuned to prefer performance or durability when storing
|
||||
to the underlying upper layer. This is controlled by the "fsync" mount
|
||||
option, which supports these values:
|
||||
|
||||
- "auto": (default)
|
||||
Call fsync(2) on upper file before completion of data copy up.
|
||||
No explicit fsync(2) on directory or metadata-only copy up.
|
||||
- "strict":
|
||||
Call fsync(2) on upper file and directories before completion of any
|
||||
copy up.
|
||||
- "volatile": [*]
|
||||
Prefer performance over durability (see `Volatile mount`_)
|
||||
|
||||
[*] The mount option "volatile" is an alias to "fsync=volatile".
|
||||
|
||||
|
||||
Volatile mount
|
||||
--------------
|
||||
|
||||
|
|
|
|||
|
|
@ -27,10 +27,10 @@ for details.
|
|||
Sysfs entries
|
||||
-------------
|
||||
|
||||
The following attributes are supported. Current maxim attribute
|
||||
The following attributes are supported. Current maximum attribute
|
||||
is read-write, all other attributes are read-only.
|
||||
|
||||
in0_input Measured voltage in microvolts.
|
||||
in0_input Measured voltage in millivolts.
|
||||
|
||||
curr1_input Measured current in microamperes.
|
||||
curr1_max_alarm Overcurrent alarm in microamperes.
|
||||
curr1_input Measured current in milliamperes.
|
||||
curr1_max Overcurrent shutdown threshold in milliamperes.
|
||||
|
|
|
|||
|
|
@ -51,8 +51,9 @@ temp1_max Provides thermal control temperature of the CPU package
|
|||
temp1_crit Provides shutdown temperature of the CPU package which
|
||||
is also known as the maximum processor junction
|
||||
temperature, Tjmax or Tprochot.
|
||||
temp1_crit_hyst Provides the hysteresis value from Tcontrol to Tjmax of
|
||||
the CPU package.
|
||||
temp1_crit_hyst Provides the hysteresis temperature of the CPU
|
||||
package. Returns Tcontrol, the temperature at which
|
||||
the critical condition clears.
|
||||
|
||||
temp2_label "DTS"
|
||||
temp2_input Provides current temperature of the CPU package scaled
|
||||
|
|
@ -62,8 +63,9 @@ temp2_max Provides thermal control temperature of the CPU package
|
|||
temp2_crit Provides shutdown temperature of the CPU package which
|
||||
is also known as the maximum processor junction
|
||||
temperature, Tjmax or Tprochot.
|
||||
temp2_crit_hyst Provides the hysteresis value from Tcontrol to Tjmax of
|
||||
the CPU package.
|
||||
temp2_crit_hyst Provides the hysteresis temperature of the CPU
|
||||
package. Returns Tcontrol, the temperature at which
|
||||
the critical condition clears.
|
||||
|
||||
temp3_label "Tcontrol"
|
||||
temp3_input Provides current Tcontrol temperature of the CPU
|
||||
|
|
|
|||
|
|
@ -5,8 +5,138 @@ Security bugs
|
|||
|
||||
Linux kernel developers take security very seriously. As such, we'd
|
||||
like to know when a security bug is found so that it can be fixed and
|
||||
disclosed as quickly as possible. Please report security bugs to the
|
||||
Linux kernel security team.
|
||||
disclosed as quickly as possible.
|
||||
|
||||
Preparing your report
|
||||
---------------------
|
||||
|
||||
Like with any bug report, a security bug report requires a lot of analysis work
|
||||
from the developers, so the more information you can share about the issue, the
|
||||
better. Please review the procedure outlined in
|
||||
Documentation/admin-guide/reporting-issues.rst if you are unclear about what
|
||||
information is helpful. The following information are absolutely necessary in
|
||||
**any** security bug report:
|
||||
|
||||
* **affected kernel version range**: with no version indication, your report
|
||||
will not be processed. A significant part of reports are for bugs that
|
||||
have already been fixed, so it is extremely important that vulnerabilities
|
||||
are verified on recent versions (development tree or latest stable
|
||||
version), at least by verifying that the code has not changed since the
|
||||
version where it was detected.
|
||||
|
||||
* **description of the problem**: a detailed description of the problem, with
|
||||
traces showing its manifestation, and why you consider that the observed
|
||||
behavior as a problem in the kernel, is necessary.
|
||||
|
||||
* **reproducer**: developers will need to be able to reproduce the problem to
|
||||
consider a fix as effective. This includes both a way to trigger the issue
|
||||
and a way to confirm it happens. A reproducer with low complexity
|
||||
dependencies will be needed (source code, shell script, sequence of
|
||||
instructions, file-system image etc). Binary-only executables are not
|
||||
accepted. Working exploits are extremely helpful and will not be released
|
||||
without consent from the reporter, unless they are already public. By
|
||||
definition if an issue cannot be reproduced, it is not exploitable, thus it
|
||||
is not a security bug.
|
||||
|
||||
* **conditions**: if the bug depends on certain configuration options,
|
||||
sysctls, permissions, timing, code modifications etc, these should be
|
||||
indicated.
|
||||
|
||||
In addition, the following information are highly desirable:
|
||||
|
||||
* **suspected location of the bug**: the file names and functions where the
|
||||
bug is suspected to be present are very important, at least to help forward
|
||||
the report to the appropriate maintainers. When not possible (for example,
|
||||
"system freezes each time I run this command"), the security team will help
|
||||
identify the source of the bug.
|
||||
|
||||
* **a proposed fix**: bug reporters who have analyzed the cause of a bug in
|
||||
the source code almost always have an accurate idea on how to fix it,
|
||||
because they spent a long time studying it and its implications. Proposing
|
||||
a tested fix will save maintainers a lot of time, even if the fix ends up
|
||||
not being the right one, because it helps understand the bug. When
|
||||
proposing a tested fix, please always format it in a way that can be
|
||||
immediately merged (see Documentation/process/submitting-patches.rst).
|
||||
This will save some back-and-forth exchanges if it is accepted, and you
|
||||
will be credited for finding and fixing this issue. Note that in this case
|
||||
only a ``Signed-off-by:`` tag is needed, without ``Reported-by:`` when the
|
||||
reporter and author are the same.
|
||||
|
||||
* **mitigations**: very often during a bug analysis, some ways of mitigating
|
||||
the issue appear. It is useful to share them, as they can be helpful to
|
||||
keep end users protected during the time it takes them to apply the fix.
|
||||
|
||||
Identifying contacts
|
||||
--------------------
|
||||
|
||||
The most effective way to report a security bug is to send it directly to the
|
||||
affected subsystem's maintainers and Cc: the Linux kernel security team. Do
|
||||
not send it to a public list at this stage, unless you have good reasons to
|
||||
consider the issue as being public or trivial to discover (e.g. result of a
|
||||
widely available automated vulnerability scanning tool that can be repeated by
|
||||
anyone).
|
||||
|
||||
If you're sending a report for issues affecting multiple parts in the kernel,
|
||||
even if they're fairly similar issues, please send individual messages (think
|
||||
that maintainers will not all work on the issues at the same time). The only
|
||||
exception is when an issue concerns closely related parts maintained by the
|
||||
exact same subset of maintainers, and these parts are expected to be fixed all
|
||||
at once by the same commit, then it may be acceptable to report them at once.
|
||||
|
||||
One difficulty for most first-time reporters is to figure the right list of
|
||||
recipients to send a report to. In the Linux kernel, all official maintainers
|
||||
are trusted, so the consequences of accidentally including the wrong maintainer
|
||||
are essentially a bit more noise for that person, i.e. nothing dramatic. As
|
||||
such, a suitable method to figure the list of maintainers (which kernel
|
||||
security officers use) is to rely on the get_maintainer.pl script, tuned to
|
||||
only report maintainers. This script, when passed a file name, will look for
|
||||
its path in the MAINTAINERS file to figure a hierarchical list of relevant
|
||||
maintainers. Calling it a first time with the finest level of filtering will
|
||||
most of the time return a short list of this specific file's maintainers::
|
||||
|
||||
$ ./scripts/get_maintainer.pl --no-l --no-r --pattern-depth 1 \
|
||||
drivers/example.c
|
||||
Developer One <dev1@example.com> (maintainer:example driver)
|
||||
Developer Two <dev2@example.org> (maintainer:example driver)
|
||||
|
||||
These two maintainers should then receive the message. If the command does not
|
||||
return anything, it means the affected file is part of a wider subsystem, so we
|
||||
should be less specific::
|
||||
|
||||
$ ./scripts/get_maintainer.pl --no-l --no-r drivers/example.c
|
||||
Developer One <dev1@example.com> (maintainer:example subsystem)
|
||||
Developer Two <dev2@example.org> (maintainer:example subsystem)
|
||||
Developer Three <dev3@example.com> (maintainer:example subsystem [GENERAL])
|
||||
Developer Four <dev4@example.org> (maintainer:example subsystem [GENERAL])
|
||||
|
||||
Here, picking the first, most specific ones, is sufficient. When the list is
|
||||
long, it is possible to produce a comma-delimited e-mail address list on a
|
||||
single line suitable for use in the To: field of a mailer like this::
|
||||
|
||||
$ ./scripts/get_maintainer.pl --no-tree --no-l --no-r --no-n --m \
|
||||
--no-git-fallback --no-substatus --no-rolestats --no-multiline \
|
||||
--pattern-depth 1 drivers/example.c
|
||||
dev1@example.com, dev2@example.org
|
||||
|
||||
or this for the wider list::
|
||||
|
||||
$ ./scripts/get_maintainer.pl --no-tree --no-l --no-r --no-n --m \
|
||||
--no-git-fallback --no-substatus --no-rolestats --no-multiline \
|
||||
drivers/example.c
|
||||
dev1@example.com, dev2@example.org, dev3@example.com, dev4@example.org
|
||||
|
||||
If at this point you're still facing difficulties spotting the right
|
||||
maintainers, **and only in this case**, it's possible to send your report to
|
||||
the Linux kernel security team only. Your message will be triaged, and you
|
||||
will receive instructions about whom to contact, if needed. Your message may
|
||||
equally be forwarded as-is to the relevant maintainers.
|
||||
|
||||
Sending the report
|
||||
------------------
|
||||
|
||||
Reports are to be sent over e-mail exclusively. Please use a working e-mail
|
||||
address, preferably the same that you want to appear in ``Reported-by`` tags
|
||||
if any. If unsure, send your report to yourself first.
|
||||
|
||||
The security team and maintainers almost always require additional
|
||||
information beyond what was initially provided in a report and rely on
|
||||
|
|
@ -18,20 +148,12 @@ run additional tests. Reports where the reporter does not respond promptly
|
|||
or cannot effectively discuss their findings may be abandoned if the
|
||||
communication does not quickly improve.
|
||||
|
||||
As it is with any bug, the more information provided the easier it
|
||||
will be to diagnose and fix. Please review the procedure outlined in
|
||||
'Documentation/admin-guide/reporting-issues.rst' if you are unclear about what
|
||||
information is helpful. Any exploit code is very helpful and will not
|
||||
be released without consent from the reporter unless it has already been
|
||||
made public.
|
||||
|
||||
The report must be sent to maintainers, with the security team in ``Cc:``.
|
||||
The Linux kernel security team can be contacted by email at
|
||||
<security@kernel.org>. This is a private list of security officers
|
||||
who will help verify the bug report and develop and release a fix.
|
||||
If you already have a fix, please include it with your report, as
|
||||
that can speed up the process considerably. It is possible that the
|
||||
security team will bring in extra help from area maintainers to
|
||||
understand and fix the security vulnerability.
|
||||
who will help verify the bug report and assist developers working on a fix.
|
||||
It is possible that the security team will bring in extra help from area
|
||||
maintainers to understand and fix the security vulnerability.
|
||||
|
||||
Please send **plain text** emails without attachments where possible.
|
||||
It is much harder to have a context-quoted discussion about a complex
|
||||
|
|
@ -42,7 +164,9 @@ reproduction steps, and follow it with a proposed fix, all in plain text.
|
|||
Markdown, HTML and RST formatted reports are particularly frowned upon since
|
||||
they're quite hard to read for humans and encourage to use dedicated viewers,
|
||||
sometimes online, which by definition is not acceptable for a confidential
|
||||
security report.
|
||||
security report. Note that some mailers tend to mangle formatting of plain
|
||||
text by default, please consult Documentation/process/email-clients.rst for
|
||||
more info.
|
||||
|
||||
Disclosure and embargoed information
|
||||
------------------------------------
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ Landlock: unprivileged access control
|
|||
=====================================
|
||||
|
||||
:Author: Mickaël Salaün
|
||||
:Date: January 2026
|
||||
:Date: March 2026
|
||||
|
||||
The goal of Landlock is to enable restriction of ambient rights (e.g. global
|
||||
filesystem or network access) for a set of processes. Because Landlock
|
||||
|
|
@ -197,12 +197,27 @@ similar backwards compatibility check is needed for the restrict flags
|
|||
|
||||
.. code-block:: c
|
||||
|
||||
__u32 restrict_flags = LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON;
|
||||
if (abi < 7) {
|
||||
/* Clear logging flags unsupported before ABI 7. */
|
||||
__u32 restrict_flags =
|
||||
LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON |
|
||||
LANDLOCK_RESTRICT_SELF_TSYNC;
|
||||
switch (abi) {
|
||||
case 1 ... 6:
|
||||
/* Removes logging flags for ABI < 7 */
|
||||
restrict_flags &= ~(LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF |
|
||||
LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON |
|
||||
LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF);
|
||||
__attribute__((fallthrough));
|
||||
case 7:
|
||||
/*
|
||||
* Removes multithreaded enforcement flag for ABI < 8
|
||||
*
|
||||
* WARNING: Without this flag, calling landlock_restrict_self(2) is
|
||||
* only equivalent if the calling process is single-threaded. Below
|
||||
* ABI v8 (and as of ABI v8, when not using this flag), a Landlock
|
||||
* policy would only be enforced for the calling thread and its
|
||||
* children (and not for all threads, including parents and siblings).
|
||||
*/
|
||||
restrict_flags &= ~LANDLOCK_RESTRICT_SELF_TSYNC;
|
||||
}
|
||||
|
||||
The next step is to restrict the current thread from gaining more privileges
|
||||
|
|
|
|||
23
MAINTAINERS
23
MAINTAINERS
|
|
@ -3986,7 +3986,7 @@ F: drivers/hwmon/asus-ec-sensors.c
|
|||
ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS
|
||||
M: Corentin Chary <corentin.chary@gmail.com>
|
||||
M: Luke D. Jones <luke@ljones.dev>
|
||||
M: Denis Benato <benato.denis96@gmail.com>
|
||||
M: Denis Benato <denis.benato@linux.dev>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
W: https://asus-linux.org/
|
||||
|
|
@ -8628,8 +8628,14 @@ F: drivers/gpu/drm/lima/
|
|||
F: include/uapi/drm/lima_drm.h
|
||||
|
||||
DRM DRIVERS FOR LOONGSON
|
||||
M: Jianmin Lv <lvjianmin@loongson.cn>
|
||||
M: Qianhai Wu <wuqianhai@loongson.cn>
|
||||
R: Huacai Chen <chenhuacai@kernel.org>
|
||||
R: Mingcong Bai <jeffbai@aosc.io>
|
||||
R: Xi Ruoyao <xry111@xry111.site>
|
||||
R: Icenowy Zheng <zhengxingda@iscas.ac.cn>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Orphan
|
||||
S: Maintained
|
||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||
F: drivers/gpu/drm/loongson/
|
||||
|
||||
|
|
@ -9613,7 +9619,12 @@ F: include/linux/ext2*
|
|||
|
||||
EXT4 FILE SYSTEM
|
||||
M: "Theodore Ts'o" <tytso@mit.edu>
|
||||
M: Andreas Dilger <adilger.kernel@dilger.ca>
|
||||
R: Andreas Dilger <adilger.kernel@dilger.ca>
|
||||
R: Baokun Li <libaokun@linux.alibaba.com>
|
||||
R: Jan Kara <jack@suse.cz>
|
||||
R: Ojaswin Mujoo <ojaswin@linux.ibm.com>
|
||||
R: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
|
||||
R: Zhang Yi <yi.zhang@huawei.com>
|
||||
L: linux-ext4@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://ext4.wiki.kernel.org
|
||||
|
|
@ -12009,7 +12020,6 @@ I2C SUBSYSTEM
|
|||
M: Wolfram Sang <wsa+renesas@sang-engineering.com>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
S: Maintained
|
||||
W: https://i2c.wiki.kernel.org/
|
||||
Q: https://patchwork.ozlabs.org/project/linux-i2c/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git
|
||||
F: Documentation/i2c/
|
||||
|
|
@ -12035,7 +12045,6 @@ I2C SUBSYSTEM HOST DRIVERS
|
|||
M: Andi Shyti <andi.shyti@kernel.org>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
S: Maintained
|
||||
W: https://i2c.wiki.kernel.org/
|
||||
Q: https://patchwork.ozlabs.org/project/linux-i2c/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andi.shyti/linux.git
|
||||
F: Documentation/devicetree/bindings/i2c/
|
||||
|
|
@ -16877,7 +16886,7 @@ M: Lorenzo Stoakes <ljs@kernel.org>
|
|||
R: Rik van Riel <riel@surriel.com>
|
||||
R: Liam R. Howlett <Liam.Howlett@oracle.com>
|
||||
R: Vlastimil Babka <vbabka@kernel.org>
|
||||
R: Harry Yoo <harry.yoo@oracle.com>
|
||||
R: Harry Yoo <harry@kernel.org>
|
||||
R: Jann Horn <jannh@google.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
|
|
@ -24343,7 +24352,7 @@ F: drivers/nvmem/layouts/sl28vpd.c
|
|||
|
||||
SLAB ALLOCATOR
|
||||
M: Vlastimil Babka <vbabka@kernel.org>
|
||||
M: Harry Yoo <harry.yoo@oracle.com>
|
||||
M: Harry Yoo <harry@kernel.org>
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
R: Hao Li <hao.li@linux.dev>
|
||||
R: Christoph Lameter <cl@gentwo.org>
|
||||
|
|
|
|||
4
Makefile
4
Makefile
|
|
@ -2,7 +2,7 @@
|
|||
VERSION = 7
|
||||
PATCHLEVEL = 0
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
@ -1654,7 +1654,7 @@ CLEAN_FILES += vmlinux.symvers modules-only.symvers \
|
|||
modules.builtin.ranges vmlinux.o.map vmlinux.unstripped \
|
||||
compile_commands.json rust/test \
|
||||
rust-project.json .vmlinux.objs .vmlinux.export.c \
|
||||
.builtin-dtbs-list .builtin-dtb.S
|
||||
.builtin-dtbs-list .builtin-dtbs.S
|
||||
|
||||
# Directories & files removed with 'make mrproper'
|
||||
MRPROPER_FILES += include/config include/generated \
|
||||
|
|
|
|||
|
|
@ -252,6 +252,7 @@ config ARM64
|
|||
select HAVE_RSEQ
|
||||
select HAVE_RUST if RUSTC_SUPPORTS_ARM64
|
||||
select HAVE_STACKPROTECTOR
|
||||
select HAVE_STATIC_CALL if CFI
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_KPROBES
|
||||
select HAVE_KRETPROBES
|
||||
|
|
|
|||
31
arch/arm64/include/asm/static_call.h
Normal file
31
arch/arm64/include/asm/static_call.h
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_STATIC_CALL_H
|
||||
#define _ASM_STATIC_CALL_H
|
||||
|
||||
#define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, target) \
|
||||
asm(" .pushsection .static_call.text, \"ax\" \n" \
|
||||
" .align 4 \n" \
|
||||
" .globl " name " \n" \
|
||||
name ": \n" \
|
||||
" hint 34 /* BTI C */ \n" \
|
||||
" adrp x16, 1f \n" \
|
||||
" ldr x16, [x16, :lo12:1f] \n" \
|
||||
" br x16 \n" \
|
||||
" .type " name ", %function \n" \
|
||||
" .size " name ", . - " name " \n" \
|
||||
" .popsection \n" \
|
||||
" .pushsection .rodata, \"a\" \n" \
|
||||
" .align 3 \n" \
|
||||
"1: .quad " target " \n" \
|
||||
" .popsection \n")
|
||||
|
||||
#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
|
||||
__ARCH_DEFINE_STATIC_CALL_TRAMP(STATIC_CALL_TRAMP_STR(name), #func)
|
||||
|
||||
#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
|
||||
ARCH_DEFINE_STATIC_CALL_TRAMP(name, __static_call_return0)
|
||||
|
||||
#define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) \
|
||||
ARCH_DEFINE_STATIC_CALL_TRAMP(name, __static_call_return0)
|
||||
|
||||
#endif /* _ASM_STATIC_CALL_H */
|
||||
|
|
@ -46,6 +46,7 @@ obj-$(CONFIG_MODULES) += module.o module-plts.o
|
|||
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
|
||||
obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o
|
||||
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
|
||||
obj-$(CONFIG_HAVE_STATIC_CALL) += static_call.o
|
||||
obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
|
||||
obj-$(CONFIG_KGDB) += kgdb.o
|
||||
obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o
|
||||
|
|
|
|||
23
arch/arm64/kernel/static_call.c
Normal file
23
arch/arm64/kernel/static_call.c
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/static_call.h>
|
||||
#include <linux/memory.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
|
||||
{
|
||||
u64 literal;
|
||||
int ret;
|
||||
|
||||
if (!func)
|
||||
func = __static_call_return0;
|
||||
|
||||
/* decode the instructions to discover the literal address */
|
||||
literal = ALIGN_DOWN((u64)tramp + 4, SZ_4K) +
|
||||
aarch64_insn_adrp_get_offset(le32_to_cpup(tramp + 4)) +
|
||||
8 * aarch64_insn_decode_immediate(AARCH64_INSN_IMM_12,
|
||||
le32_to_cpup(tramp + 8));
|
||||
|
||||
ret = aarch64_insn_write_literal_u64((void *)literal, (u64)func);
|
||||
WARN_ON_ONCE(ret);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arch_static_call_transform);
|
||||
|
|
@ -191,6 +191,7 @@ SECTIONS
|
|||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
HYPERVISOR_TEXT
|
||||
STATIC_CALL_TEXT
|
||||
*(.gnu.warning)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1753,7 +1753,7 @@ int __kvm_at_swap_desc(struct kvm *kvm, gpa_t ipa, u64 old, u64 new)
|
|||
if (!writable)
|
||||
return -EPERM;
|
||||
|
||||
ptep = (u64 __user *)hva + offset;
|
||||
ptep = (void __user *)hva + offset;
|
||||
if (cpus_have_final_cap(ARM64_HAS_LSE_ATOMICS))
|
||||
r = __lse_swap_desc(ptep, old, new);
|
||||
else
|
||||
|
|
|
|||
|
|
@ -247,6 +247,20 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
|||
kvm_vcpu_set_be(vcpu);
|
||||
|
||||
*vcpu_pc(vcpu) = target_pc;
|
||||
|
||||
/*
|
||||
* We may come from a state where either a PC update was
|
||||
* pending (SMC call resulting in PC being increpented to
|
||||
* skip the SMC) or a pending exception. Make sure we get
|
||||
* rid of all that, as this cannot be valid out of reset.
|
||||
*
|
||||
* Note that clearing the exception mask also clears PC
|
||||
* updates, but that's an implementation detail, and we
|
||||
* really want to make it explicit.
|
||||
*/
|
||||
vcpu_clear_flag(vcpu, PENDING_EXCEPTION);
|
||||
vcpu_clear_flag(vcpu, EXCEPT_MASK);
|
||||
vcpu_clear_flag(vcpu, INCREMENT_PC);
|
||||
vcpu_set_reg(vcpu, 0, reset_state.r0);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -41,4 +41,40 @@
|
|||
.cfi_endproc; \
|
||||
SYM_END(name, SYM_T_NONE)
|
||||
|
||||
/*
|
||||
* This is for the signal handler trampoline, which is used as the return
|
||||
* address of the signal handlers in userspace instead of called normally.
|
||||
* The long standing libgcc bug https://gcc.gnu.org/PR124050 requires a
|
||||
* nop between .cfi_startproc and the actual address of the trampoline, so
|
||||
* we cannot simply use SYM_FUNC_START.
|
||||
*
|
||||
* This wrapper also contains all the .cfi_* directives for recovering
|
||||
* the content of the GPRs and the "return address" (where the rt_sigreturn
|
||||
* syscall will jump to), assuming there is a struct rt_sigframe (where
|
||||
* a struct sigcontext containing those information we need to recover) at
|
||||
* $sp. The "DWARF for the LoongArch(TM) Architecture" manual states
|
||||
* column 0 is for $zero, but it does not make too much sense to
|
||||
* save/restore the hardware zero register. Repurpose this column here
|
||||
* for the return address (here it's not the content of $ra we cannot use
|
||||
* the default column 3).
|
||||
*/
|
||||
#define SYM_SIGFUNC_START(name) \
|
||||
.cfi_startproc; \
|
||||
.cfi_signal_frame; \
|
||||
.cfi_def_cfa 3, RT_SIGFRAME_SC; \
|
||||
.cfi_return_column 0; \
|
||||
.cfi_offset 0, SC_PC; \
|
||||
\
|
||||
.irp num, 1, 2, 3, 4, 5, 6, 7, 8, \
|
||||
9, 10, 11, 12, 13, 14, 15, 16, \
|
||||
17, 18, 19, 20, 21, 22, 23, 24, \
|
||||
25, 26, 27, 28, 29, 30, 31; \
|
||||
.cfi_offset \num, SC_REGS + \num * SZREG; \
|
||||
.endr; \
|
||||
\
|
||||
nop; \
|
||||
SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
|
||||
|
||||
#define SYM_SIGFUNC_END(name) SYM_FUNC_END(name)
|
||||
|
||||
#endif
|
||||
|
|
|
|||
9
arch/loongarch/include/asm/sigframe.h
Normal file
9
arch/loongarch/include/asm/sigframe.h
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
|
||||
#include <asm/siginfo.h>
|
||||
#include <asm/ucontext.h>
|
||||
|
||||
struct rt_sigframe {
|
||||
struct siginfo rs_info;
|
||||
struct ucontext rs_uctx;
|
||||
};
|
||||
|
|
@ -16,6 +16,7 @@
|
|||
#include <asm/ptrace.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/sigframe.h>
|
||||
#include <vdso/datapage.h>
|
||||
|
||||
static void __used output_ptreg_defines(void)
|
||||
|
|
@ -220,6 +221,7 @@ static void __used output_sc_defines(void)
|
|||
COMMENT("Linux sigcontext offsets.");
|
||||
OFFSET(SC_REGS, sigcontext, sc_regs);
|
||||
OFFSET(SC_PC, sigcontext, sc_pc);
|
||||
OFFSET(RT_SIGFRAME_SC, rt_sigframe, rs_uctx.uc_mcontext);
|
||||
BLANK();
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -42,16 +42,15 @@ static int __init init_cpu_fullname(void)
|
|||
int cpu, ret;
|
||||
char *cpuname;
|
||||
const char *model;
|
||||
struct device_node *root;
|
||||
|
||||
/* Parsing cpuname from DTS model property */
|
||||
root = of_find_node_by_path("/");
|
||||
ret = of_property_read_string(root, "model", &model);
|
||||
ret = of_property_read_string(of_root, "model", &model);
|
||||
if (ret == 0) {
|
||||
cpuname = kstrdup(model, GFP_KERNEL);
|
||||
if (!cpuname)
|
||||
return -ENOMEM;
|
||||
loongson_sysconf.cpuname = strsep(&cpuname, " ");
|
||||
}
|
||||
of_node_put(root);
|
||||
|
||||
if (loongson_sysconf.cpuname && !strncmp(loongson_sysconf.cpuname, "Loongson", 8)) {
|
||||
for (cpu = 0; cpu < NR_CPUS; cpu++)
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@
|
|||
#include <asm/cpu-features.h>
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/lbt.h>
|
||||
#include <asm/sigframe.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/vdso.h>
|
||||
|
||||
|
|
@ -51,11 +52,6 @@
|
|||
#define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); })
|
||||
#define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); })
|
||||
|
||||
struct rt_sigframe {
|
||||
struct siginfo rs_info;
|
||||
struct ucontext rs_uctx;
|
||||
};
|
||||
|
||||
struct _ctx_layout {
|
||||
struct sctx_info *addr;
|
||||
unsigned int size;
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ static inline void eiointc_update_sw_coremap(struct loongarch_eiointc *s,
|
|||
|
||||
if (!(s->status & BIT(EIOINTC_ENABLE_CPU_ENCODE))) {
|
||||
cpuid = ffs(cpuid) - 1;
|
||||
cpuid = (cpuid >= 4) ? 0 : cpuid;
|
||||
cpuid = ((cpuid < 0) || (cpuid >= 4)) ? 0 : cpuid;
|
||||
}
|
||||
|
||||
vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
|
||||
|
|
@ -472,34 +472,34 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev,
|
|||
switch (addr) {
|
||||
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
|
||||
offset = (addr - EIOINTC_NODETYPE_START) / 4;
|
||||
p = s->nodetype + offset * 4;
|
||||
p = (void *)s->nodetype + offset * 4;
|
||||
break;
|
||||
case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
|
||||
offset = (addr - EIOINTC_IPMAP_START) / 4;
|
||||
p = &s->ipmap + offset * 4;
|
||||
p = (void *)&s->ipmap + offset * 4;
|
||||
break;
|
||||
case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
|
||||
offset = (addr - EIOINTC_ENABLE_START) / 4;
|
||||
p = s->enable + offset * 4;
|
||||
p = (void *)s->enable + offset * 4;
|
||||
break;
|
||||
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
|
||||
offset = (addr - EIOINTC_BOUNCE_START) / 4;
|
||||
p = s->bounce + offset * 4;
|
||||
p = (void *)s->bounce + offset * 4;
|
||||
break;
|
||||
case EIOINTC_ISR_START ... EIOINTC_ISR_END:
|
||||
offset = (addr - EIOINTC_ISR_START) / 4;
|
||||
p = s->isr + offset * 4;
|
||||
p = (void *)s->isr + offset * 4;
|
||||
break;
|
||||
case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
|
||||
if (cpu >= s->num_cpu)
|
||||
return -EINVAL;
|
||||
|
||||
offset = (addr - EIOINTC_COREISR_START) / 4;
|
||||
p = s->coreisr[cpu] + offset * 4;
|
||||
p = (void *)s->coreisr[cpu] + offset * 4;
|
||||
break;
|
||||
case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
|
||||
offset = (addr - EIOINTC_COREMAP_START) / 4;
|
||||
p = s->coremap + offset * 4;
|
||||
p = (void *)s->coremap + offset * 4;
|
||||
break;
|
||||
default:
|
||||
kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr);
|
||||
|
|
|
|||
|
|
@ -588,6 +588,9 @@ struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
|
|||
{
|
||||
struct kvm_phyid_map *map;
|
||||
|
||||
if (cpuid < 0)
|
||||
return NULL;
|
||||
|
||||
if (cpuid >= KVM_MAX_PHYID)
|
||||
return NULL;
|
||||
|
||||
|
|
|
|||
|
|
@ -5,9 +5,11 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/vgaarb.h>
|
||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/loongson.h>
|
||||
|
||||
|
|
@ -15,6 +17,9 @@
|
|||
#define PCI_DEVICE_ID_LOONGSON_DC1 0x7a06
|
||||
#define PCI_DEVICE_ID_LOONGSON_DC2 0x7a36
|
||||
#define PCI_DEVICE_ID_LOONGSON_DC3 0x7a46
|
||||
#define PCI_DEVICE_ID_LOONGSON_GPU1 0x7a15
|
||||
#define PCI_DEVICE_ID_LOONGSON_GPU2 0x7a25
|
||||
#define PCI_DEVICE_ID_LOONGSON_GPU3 0x7a35
|
||||
|
||||
int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
|
||||
int reg, int len, u32 *val)
|
||||
|
|
@ -99,3 +104,78 @@ static void pci_fixup_vgadev(struct pci_dev *pdev)
|
|||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC1, pci_fixup_vgadev);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC2, pci_fixup_vgadev);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC3, pci_fixup_vgadev);
|
||||
|
||||
#define CRTC_NUM_MAX 2
|
||||
#define CRTC_OUTPUT_ENABLE 0x100
|
||||
|
||||
static void loongson_gpu_fixup_dma_hang(struct pci_dev *pdev, bool on)
|
||||
{
|
||||
u32 i, val, count, crtc_offset, device;
|
||||
void __iomem *crtc_reg, *base, *regbase;
|
||||
static u32 crtc_status[CRTC_NUM_MAX] = { 0 };
|
||||
|
||||
base = pdev->bus->ops->map_bus(pdev->bus, pdev->devfn + 1, 0);
|
||||
device = readw(base + PCI_DEVICE_ID);
|
||||
|
||||
regbase = ioremap(readq(base + PCI_BASE_ADDRESS_0) & ~0xffull, SZ_64K);
|
||||
if (!regbase) {
|
||||
pci_err(pdev, "Failed to ioremap()\n");
|
||||
return;
|
||||
}
|
||||
|
||||
switch (device) {
|
||||
case PCI_DEVICE_ID_LOONGSON_DC2:
|
||||
crtc_reg = regbase + 0x1240;
|
||||
crtc_offset = 0x10;
|
||||
break;
|
||||
case PCI_DEVICE_ID_LOONGSON_DC3:
|
||||
crtc_reg = regbase;
|
||||
crtc_offset = 0x400;
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < CRTC_NUM_MAX; i++, crtc_reg += crtc_offset) {
|
||||
val = readl(crtc_reg);
|
||||
|
||||
if (!on)
|
||||
crtc_status[i] = val;
|
||||
|
||||
/* No need to fixup if the status is off at startup. */
|
||||
if (!(crtc_status[i] & CRTC_OUTPUT_ENABLE))
|
||||
continue;
|
||||
|
||||
if (on)
|
||||
val |= CRTC_OUTPUT_ENABLE;
|
||||
else
|
||||
val &= ~CRTC_OUTPUT_ENABLE;
|
||||
|
||||
mb();
|
||||
writel(val, crtc_reg);
|
||||
|
||||
for (count = 0; count < 40; count++) {
|
||||
val = readl(crtc_reg) & CRTC_OUTPUT_ENABLE;
|
||||
if ((on && val) || (!on && !val))
|
||||
break;
|
||||
udelay(1000);
|
||||
}
|
||||
|
||||
pci_info(pdev, "DMA hang fixup at reg[0x%lx]: 0x%x\n",
|
||||
(unsigned long)crtc_reg & 0xffff, readl(crtc_reg));
|
||||
}
|
||||
|
||||
iounmap(regbase);
|
||||
}
|
||||
|
||||
static void pci_fixup_dma_hang_early(struct pci_dev *pdev)
|
||||
{
|
||||
loongson_gpu_fixup_dma_hang(pdev, false);
|
||||
}
|
||||
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_GPU2, pci_fixup_dma_hang_early);
|
||||
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_GPU3, pci_fixup_dma_hang_early);
|
||||
|
||||
static void pci_fixup_dma_hang_final(struct pci_dev *pdev)
|
||||
{
|
||||
loongson_gpu_fixup_dma_hang(pdev, true);
|
||||
}
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_GPU2, pci_fixup_dma_hang_final);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_GPU3, pci_fixup_dma_hang_final);
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ cflags-vdso := $(ccflags-vdso) \
|
|||
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
|
||||
-std=gnu11 -fms-extensions -O2 -g -fno-strict-aliasing -fno-common -fno-builtin \
|
||||
-fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
|
||||
$(call cc-option, -fno-asynchronous-unwind-tables) \
|
||||
$(call cc-option, -fasynchronous-unwind-tables) \
|
||||
$(call cc-option, -fno-stack-protector)
|
||||
aflags-vdso := $(ccflags-vdso) \
|
||||
-D__ASSEMBLY__ -Wa,-gdwarf-2
|
||||
|
|
@ -41,7 +41,7 @@ endif
|
|||
|
||||
# VDSO linker flags.
|
||||
ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
|
||||
$(filter -E%,$(KBUILD_CFLAGS)) -shared --build-id -T
|
||||
$(filter -E%,$(KBUILD_CFLAGS)) -shared --build-id --eh-frame-hdr -T
|
||||
|
||||
#
|
||||
# Shared build commands.
|
||||
|
|
|
|||
|
|
@ -12,13 +12,13 @@
|
|||
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
.section .text
|
||||
.cfi_sections .debug_frame
|
||||
|
||||
SYM_FUNC_START(__vdso_rt_sigreturn)
|
||||
SYM_SIGFUNC_START(__vdso_rt_sigreturn)
|
||||
|
||||
li.w a7, __NR_rt_sigreturn
|
||||
syscall 0
|
||||
|
||||
SYM_FUNC_END(__vdso_rt_sigreturn)
|
||||
SYM_SIGFUNC_END(__vdso_rt_sigreturn)
|
||||
|
|
|
|||
|
|
@ -484,7 +484,6 @@
|
|||
# endif
|
||||
# ifndef cpu_vmbits
|
||||
# define cpu_vmbits cpu_data[0].vmbits
|
||||
# define __NEED_VMBITS_PROBE
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
|
|
|||
|
|
@ -80,9 +80,7 @@ struct cpuinfo_mips {
|
|||
int srsets; /* Shadow register sets */
|
||||
int package;/* physical package number */
|
||||
unsigned int globalnumber;
|
||||
#ifdef CONFIG_64BIT
|
||||
int vmbits; /* Virtual memory size in bits */
|
||||
#endif
|
||||
void *data; /* Additional data */
|
||||
unsigned int watch_reg_count; /* Number that exist */
|
||||
unsigned int watch_reg_use_cnt; /* Usable by ptrace */
|
||||
|
|
|
|||
|
|
@ -1871,6 +1871,8 @@ do { \
|
|||
|
||||
#define read_c0_entryhi() __read_ulong_c0_register($10, 0)
|
||||
#define write_c0_entryhi(val) __write_ulong_c0_register($10, 0, val)
|
||||
#define read_c0_entryhi_64() __read_64bit_c0_register($10, 0)
|
||||
#define write_c0_entryhi_64(val) __write_64bit_c0_register($10, 0, val)
|
||||
|
||||
#define read_c0_guestctl1() __read_32bit_c0_register($10, 4)
|
||||
#define write_c0_guestctl1(val) __write_32bit_c0_register($10, 4, val)
|
||||
|
|
|
|||
|
|
@ -210,11 +210,14 @@ static inline void set_elf_base_platform(const char *plat)
|
|||
|
||||
static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
|
||||
{
|
||||
#ifdef __NEED_VMBITS_PROBE
|
||||
write_c0_entryhi(0x3fffffffffffe000ULL);
|
||||
back_to_back_c0_hazard();
|
||||
c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
|
||||
#endif
|
||||
int vmbits = 31;
|
||||
|
||||
if (cpu_has_64bits) {
|
||||
write_c0_entryhi_64(0x3fffffffffffe000ULL);
|
||||
back_to_back_c0_hazard();
|
||||
vmbits = fls64(read_c0_entryhi_64() & 0x3fffffffffffe000ULL);
|
||||
}
|
||||
c->vmbits = vmbits;
|
||||
}
|
||||
|
||||
static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
|
||||
|
|
|
|||
|
|
@ -137,6 +137,8 @@ void cpu_probe(void)
|
|||
else
|
||||
cpu_set_nofpu_opts(c);
|
||||
|
||||
c->vmbits = 31;
|
||||
|
||||
reserve_exception_space(0, 0x400);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,12 +4,12 @@
|
|||
#include "libgcc.h"
|
||||
|
||||
/*
|
||||
* GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for
|
||||
* GCC 9 & older can suboptimally generate __multi3 calls for mips64r6, so for
|
||||
* that specific case only we implement that intrinsic here.
|
||||
*
|
||||
* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
|
||||
*/
|
||||
#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8)
|
||||
#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 10)
|
||||
|
||||
/* multiply 64-bit values, low 64-bits returned */
|
||||
static inline long long notrace dmulu(long long a, long long b)
|
||||
|
|
@ -51,4 +51,4 @@ ti_type notrace __multi3(ti_type a, ti_type b)
|
|||
}
|
||||
EXPORT_SYMBOL(__multi3);
|
||||
|
||||
#endif /* 64BIT && CPU_MIPSR6 && GCC7 */
|
||||
#endif /* 64BIT && CPU_MIPSR6 && GCC9 */
|
||||
|
|
|
|||
|
|
@ -17,7 +17,9 @@
|
|||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/libfdt.h>
|
||||
#include <linux/minmax.h>
|
||||
#include <linux/pci_ids.h>
|
||||
#include <linux/serial_core.h>
|
||||
#include <linux/string_choices.h>
|
||||
#include <asm/bootinfo.h>
|
||||
#include <loongson.h>
|
||||
|
|
@ -106,9 +108,23 @@ static void __init lefi_fixup_fdt(struct system_loongson *system)
|
|||
|
||||
is_loongson64g = (read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G;
|
||||
|
||||
for (i = 0; i < system->nr_uarts; i++) {
|
||||
for (i = 0; i < min(system->nr_uarts, MAX_UARTS); i++) {
|
||||
uartdev = &system->uarts[i];
|
||||
|
||||
/*
|
||||
* Some firmware does not set nr_uarts properly and passes empty
|
||||
* items. Ignore them silently.
|
||||
*/
|
||||
if (uartdev->uart_base == 0)
|
||||
continue;
|
||||
|
||||
/* Our DT only works with UPIO_MEM. */
|
||||
if (uartdev->iotype != UPIO_MEM) {
|
||||
pr_warn("Ignore UART 0x%llx with iotype %u passed by firmware\n",
|
||||
uartdev->uart_base, uartdev->iotype);
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = lefi_fixup_fdt_serial(fdt_buf, uartdev->uart_base,
|
||||
uartdev->uartclk);
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -207,7 +207,8 @@ void cpu_cache_init(void)
|
|||
{
|
||||
if (IS_ENABLED(CONFIG_CPU_R3000) && cpu_has_3k_cache)
|
||||
r3k_cache_init();
|
||||
if (IS_ENABLED(CONFIG_CPU_R4K_CACHE_TLB) && cpu_has_4k_cache)
|
||||
if ((IS_ENABLED(CONFIG_CPU_R4K_CACHE_TLB) ||
|
||||
IS_ENABLED(CONFIG_CPU_SB1)) && cpu_has_4k_cache)
|
||||
r4k_cache_init();
|
||||
|
||||
if (IS_ENABLED(CONFIG_CPU_CAVIUM_OCTEON) && cpu_has_octeon_cache)
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/minmax.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/export.h>
|
||||
|
|
@ -24,6 +25,7 @@
|
|||
#include <asm/hazards.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/tlbdebug.h>
|
||||
#include <asm/tlbex.h>
|
||||
#include <asm/tlbmisc.h>
|
||||
#include <asm/setup.h>
|
||||
|
|
@ -511,12 +513,229 @@ static int __init set_ntlb(char *str)
|
|||
__setup("ntlb=", set_ntlb);
|
||||
|
||||
|
||||
/* Comparison function for EntryHi VPN fields. */
|
||||
static int r4k_vpn_cmp(const void *a, const void *b)
|
||||
/* The start bit position of VPN2 and Mask in EntryHi/PageMask registers. */
|
||||
#define VPN2_SHIFT 13
|
||||
|
||||
/* Read full EntryHi even with CONFIG_32BIT. */
|
||||
static inline unsigned long long read_c0_entryhi_native(void)
|
||||
{
|
||||
long v = *(unsigned long *)a - *(unsigned long *)b;
|
||||
int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
|
||||
return s ? (v != 0) | v >> s : v;
|
||||
return cpu_has_64bits ? read_c0_entryhi_64() : read_c0_entryhi();
|
||||
}
|
||||
|
||||
/* Write full EntryHi even with CONFIG_32BIT. */
|
||||
static inline void write_c0_entryhi_native(unsigned long long v)
|
||||
{
|
||||
if (cpu_has_64bits)
|
||||
write_c0_entryhi_64(v);
|
||||
else
|
||||
write_c0_entryhi(v);
|
||||
}
|
||||
|
||||
/* TLB entry state for uniquification. */
|
||||
struct tlbent {
|
||||
unsigned long long wired:1;
|
||||
unsigned long long global:1;
|
||||
unsigned long long asid:10;
|
||||
unsigned long long vpn:51;
|
||||
unsigned long long pagesz:5;
|
||||
unsigned long long index:14;
|
||||
};
|
||||
|
||||
/*
|
||||
* Comparison function for TLB entry sorting. Place wired entries first,
|
||||
* then global entries, then order by the increasing VPN/ASID and the
|
||||
* decreasing page size. This lets us avoid clashes with wired entries
|
||||
* easily and get entries for larger pages out of the way first.
|
||||
*
|
||||
* We could group bits so as to reduce the number of comparisons, but this
|
||||
* is seldom executed and not performance-critical, so prefer legibility.
|
||||
*/
|
||||
static int r4k_entry_cmp(const void *a, const void *b)
|
||||
{
|
||||
struct tlbent ea = *(struct tlbent *)a, eb = *(struct tlbent *)b;
|
||||
|
||||
if (ea.wired > eb.wired)
|
||||
return -1;
|
||||
else if (ea.wired < eb.wired)
|
||||
return 1;
|
||||
else if (ea.global > eb.global)
|
||||
return -1;
|
||||
else if (ea.global < eb.global)
|
||||
return 1;
|
||||
else if (ea.vpn < eb.vpn)
|
||||
return -1;
|
||||
else if (ea.vpn > eb.vpn)
|
||||
return 1;
|
||||
else if (ea.asid < eb.asid)
|
||||
return -1;
|
||||
else if (ea.asid > eb.asid)
|
||||
return 1;
|
||||
else if (ea.pagesz > eb.pagesz)
|
||||
return -1;
|
||||
else if (ea.pagesz < eb.pagesz)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fetch all the TLB entries. Mask individual VPN values retrieved with
|
||||
* the corresponding page mask and ignoring any 1KiB extension as we'll
|
||||
* be using 4KiB pages for uniquification.
|
||||
*/
|
||||
static void __ref r4k_tlb_uniquify_read(struct tlbent *tlb_vpns, int tlbsize)
|
||||
{
|
||||
int start = num_wired_entries();
|
||||
unsigned long long vpn_mask;
|
||||
bool global;
|
||||
int i;
|
||||
|
||||
vpn_mask = GENMASK(current_cpu_data.vmbits - 1, VPN2_SHIFT);
|
||||
vpn_mask |= cpu_has_64bits ? 3ULL << 62 : 1 << 31;
|
||||
|
||||
for (i = 0; i < tlbsize; i++) {
|
||||
unsigned long long entryhi, vpn, mask, asid;
|
||||
unsigned int pagesz;
|
||||
|
||||
write_c0_index(i);
|
||||
mtc0_tlbr_hazard();
|
||||
tlb_read();
|
||||
tlb_read_hazard();
|
||||
|
||||
global = !!(read_c0_entrylo0() & ENTRYLO_G);
|
||||
entryhi = read_c0_entryhi_native();
|
||||
mask = read_c0_pagemask();
|
||||
|
||||
asid = entryhi & cpu_asid_mask(¤t_cpu_data);
|
||||
vpn = (entryhi & vpn_mask & ~mask) >> VPN2_SHIFT;
|
||||
pagesz = ilog2((mask >> VPN2_SHIFT) + 1);
|
||||
|
||||
tlb_vpns[i].global = global;
|
||||
tlb_vpns[i].asid = global ? 0 : asid;
|
||||
tlb_vpns[i].vpn = vpn;
|
||||
tlb_vpns[i].pagesz = pagesz;
|
||||
tlb_vpns[i].wired = i < start;
|
||||
tlb_vpns[i].index = i;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Write unique values to all but the wired TLB entries each, using
|
||||
* the 4KiB page size. This size might not be supported with R6, but
|
||||
* EHINV is mandatory for R6, so we won't ever be called in that case.
|
||||
*
|
||||
* A sorted table is supplied with any wired entries at the beginning,
|
||||
* followed by any global entries, and then finally regular entries.
|
||||
* We start at the VPN and ASID values of zero and only assign user
|
||||
* addresses, therefore guaranteeing no clash with addresses produced
|
||||
* by UNIQUE_ENTRYHI. We avoid any VPN values used by wired or global
|
||||
* entries, by increasing the VPN value beyond the span of such entry.
|
||||
*
|
||||
* When a VPN/ASID clash is found with a regular entry we increment the
|
||||
* ASID instead until no VPN/ASID clash has been found or the ASID space
|
||||
* has been exhausted, in which case we increase the VPN value beyond
|
||||
* the span of the largest clashing entry.
|
||||
*
|
||||
* We do not need to be concerned about FTLB or MMID configurations as
|
||||
* those are required to implement the EHINV feature.
|
||||
*/
|
||||
static void __ref r4k_tlb_uniquify_write(struct tlbent *tlb_vpns, int tlbsize)
|
||||
{
|
||||
unsigned long long asid, vpn, vpn_size, pagesz;
|
||||
int widx, gidx, idx, sidx, lidx, i;
|
||||
|
||||
vpn_size = 1ULL << (current_cpu_data.vmbits - VPN2_SHIFT);
|
||||
pagesz = ilog2((PM_4K >> VPN2_SHIFT) + 1);
|
||||
|
||||
write_c0_pagemask(PM_4K);
|
||||
write_c0_entrylo0(0);
|
||||
write_c0_entrylo1(0);
|
||||
|
||||
asid = 0;
|
||||
vpn = 0;
|
||||
widx = 0;
|
||||
gidx = 0;
|
||||
for (sidx = 0; sidx < tlbsize && tlb_vpns[sidx].wired; sidx++)
|
||||
;
|
||||
for (lidx = sidx; lidx < tlbsize && tlb_vpns[lidx].global; lidx++)
|
||||
;
|
||||
idx = gidx = sidx + 1;
|
||||
for (i = sidx; i < tlbsize; i++) {
|
||||
unsigned long long entryhi, vpn_pagesz = 0;
|
||||
|
||||
while (1) {
|
||||
if (WARN_ON(vpn >= vpn_size)) {
|
||||
dump_tlb_all();
|
||||
/* Pray local_flush_tlb_all() will cope. */
|
||||
return;
|
||||
}
|
||||
|
||||
/* VPN must be below the next wired entry. */
|
||||
if (widx < sidx && vpn >= tlb_vpns[widx].vpn) {
|
||||
vpn = max(vpn,
|
||||
(tlb_vpns[widx].vpn +
|
||||
(1ULL << tlb_vpns[widx].pagesz)));
|
||||
asid = 0;
|
||||
widx++;
|
||||
continue;
|
||||
}
|
||||
/* VPN must be below the next global entry. */
|
||||
if (gidx < lidx && vpn >= tlb_vpns[gidx].vpn) {
|
||||
vpn = max(vpn,
|
||||
(tlb_vpns[gidx].vpn +
|
||||
(1ULL << tlb_vpns[gidx].pagesz)));
|
||||
asid = 0;
|
||||
gidx++;
|
||||
continue;
|
||||
}
|
||||
/* Try to find a free ASID so as to conserve VPNs. */
|
||||
if (idx < tlbsize && vpn == tlb_vpns[idx].vpn &&
|
||||
asid == tlb_vpns[idx].asid) {
|
||||
unsigned long long idx_pagesz;
|
||||
|
||||
idx_pagesz = tlb_vpns[idx].pagesz;
|
||||
vpn_pagesz = max(vpn_pagesz, idx_pagesz);
|
||||
do
|
||||
idx++;
|
||||
while (idx < tlbsize &&
|
||||
vpn == tlb_vpns[idx].vpn &&
|
||||
asid == tlb_vpns[idx].asid);
|
||||
asid++;
|
||||
if (asid > cpu_asid_mask(¤t_cpu_data)) {
|
||||
vpn += vpn_pagesz;
|
||||
asid = 0;
|
||||
vpn_pagesz = 0;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
/* VPN mustn't be above the next regular entry. */
|
||||
if (idx < tlbsize && vpn > tlb_vpns[idx].vpn) {
|
||||
vpn = max(vpn,
|
||||
(tlb_vpns[idx].vpn +
|
||||
(1ULL << tlb_vpns[idx].pagesz)));
|
||||
asid = 0;
|
||||
idx++;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
entryhi = (vpn << VPN2_SHIFT) | asid;
|
||||
write_c0_entryhi_native(entryhi);
|
||||
write_c0_index(tlb_vpns[i].index);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
|
||||
tlb_vpns[i].asid = asid;
|
||||
tlb_vpns[i].vpn = vpn;
|
||||
tlb_vpns[i].pagesz = pagesz;
|
||||
|
||||
asid++;
|
||||
if (asid > cpu_asid_mask(¤t_cpu_data)) {
|
||||
vpn += 1ULL << pagesz;
|
||||
asid = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -527,70 +746,25 @@ static void __ref r4k_tlb_uniquify(void)
|
|||
{
|
||||
int tlbsize = current_cpu_data.tlbsize;
|
||||
bool use_slab = slab_is_available();
|
||||
int start = num_wired_entries();
|
||||
phys_addr_t tlb_vpn_size;
|
||||
unsigned long *tlb_vpns;
|
||||
unsigned long vpn_mask;
|
||||
int cnt, ent, idx, i;
|
||||
|
||||
vpn_mask = GENMASK(cpu_vmbits - 1, 13);
|
||||
vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
|
||||
struct tlbent *tlb_vpns;
|
||||
|
||||
tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
|
||||
tlb_vpns = (use_slab ?
|
||||
kmalloc(tlb_vpn_size, GFP_KERNEL) :
|
||||
kmalloc(tlb_vpn_size, GFP_ATOMIC) :
|
||||
memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
|
||||
if (WARN_ON(!tlb_vpns))
|
||||
return; /* Pray local_flush_tlb_all() is good enough. */
|
||||
|
||||
htw_stop();
|
||||
|
||||
for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
|
||||
unsigned long vpn;
|
||||
r4k_tlb_uniquify_read(tlb_vpns, tlbsize);
|
||||
|
||||
write_c0_index(i);
|
||||
mtc0_tlbr_hazard();
|
||||
tlb_read();
|
||||
tlb_read_hazard();
|
||||
vpn = read_c0_entryhi();
|
||||
vpn &= vpn_mask & PAGE_MASK;
|
||||
tlb_vpns[cnt] = vpn;
|
||||
sort(tlb_vpns, tlbsize, sizeof(*tlb_vpns), r4k_entry_cmp, NULL);
|
||||
|
||||
/* Prevent any large pages from overlapping regular ones. */
|
||||
write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
tlbw_use_hazard();
|
||||
}
|
||||
|
||||
sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
|
||||
r4k_tlb_uniquify_write(tlb_vpns, tlbsize);
|
||||
|
||||
write_c0_pagemask(PM_DEFAULT_MASK);
|
||||
write_c0_entrylo0(0);
|
||||
write_c0_entrylo1(0);
|
||||
|
||||
idx = 0;
|
||||
ent = tlbsize;
|
||||
for (i = start; i < tlbsize; i++)
|
||||
while (1) {
|
||||
unsigned long entryhi, vpn;
|
||||
|
||||
entryhi = UNIQUE_ENTRYHI(ent);
|
||||
vpn = entryhi & vpn_mask & PAGE_MASK;
|
||||
|
||||
if (idx >= cnt || vpn < tlb_vpns[idx]) {
|
||||
write_c0_entryhi(entryhi);
|
||||
write_c0_index(i);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
ent++;
|
||||
break;
|
||||
} else if (vpn == tlb_vpns[idx]) {
|
||||
ent++;
|
||||
} else {
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
|
||||
tlbw_use_hazard();
|
||||
htw_start();
|
||||
|
|
@ -640,7 +814,8 @@ static void r4k_tlb_configure(void)
|
|||
temp_tlb_entry = current_cpu_data.tlbsize - 1;
|
||||
|
||||
/* From this point on the ARC firmware is dead. */
|
||||
r4k_tlb_uniquify();
|
||||
if (!cpu_has_tlbinv)
|
||||
r4k_tlb_uniquify();
|
||||
local_flush_tlb_all();
|
||||
|
||||
/* Did I tell you that ARC SUCKS? */
|
||||
|
|
|
|||
|
|
@ -21,16 +21,16 @@ static const char *clk_cpu(int *idx)
|
|||
{
|
||||
switch (ralink_soc) {
|
||||
case RT2880_SOC:
|
||||
*idx = 0;
|
||||
*idx = 1;
|
||||
return "ralink,rt2880-sysc";
|
||||
case RT3883_SOC:
|
||||
*idx = 0;
|
||||
*idx = 1;
|
||||
return "ralink,rt3883-sysc";
|
||||
case RT305X_SOC_RT3050:
|
||||
*idx = 0;
|
||||
*idx = 1;
|
||||
return "ralink,rt3050-sysc";
|
||||
case RT305X_SOC_RT3052:
|
||||
*idx = 0;
|
||||
*idx = 1;
|
||||
return "ralink,rt3052-sysc";
|
||||
case RT305X_SOC_RT3350:
|
||||
*idx = 1;
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
|
|||
}
|
||||
bool arch_dma_alloc_direct(struct device *dev)
|
||||
{
|
||||
if (dev->dma_ops_bypass)
|
||||
if (dev->dma_ops_bypass && dev->bus_dma_limit)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
|
@ -75,7 +75,7 @@ bool arch_dma_alloc_direct(struct device *dev)
|
|||
|
||||
bool arch_dma_free_direct(struct device *dev, dma_addr_t dma_handle)
|
||||
{
|
||||
if (!dev->dma_ops_bypass)
|
||||
if (!dev->dma_ops_bypass || !dev->bus_dma_limit)
|
||||
return false;
|
||||
|
||||
return is_direct_handle(dev, dma_handle);
|
||||
|
|
|
|||
|
|
@ -2,6 +2,10 @@
|
|||
#ifndef _ASM_RISCV_RUNTIME_CONST_H
|
||||
#define _ASM_RISCV_RUNTIME_CONST_H
|
||||
|
||||
#ifdef MODULE
|
||||
#error "Cannot use runtime-const infrastructure from modules"
|
||||
#endif
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@
|
|||
#ifndef __ASSEMBLER__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/const.h>
|
||||
|
||||
#define PTRACE_GETFDPIC 33
|
||||
|
||||
|
|
@ -138,12 +139,12 @@ struct __sc_riscv_cfi_state {
|
|||
#define PTRACE_CFI_SS_LOCK_BIT 4
|
||||
#define PTRACE_CFI_SS_PTR_BIT 5
|
||||
|
||||
#define PTRACE_CFI_LP_EN_STATE BIT(PTRACE_CFI_LP_EN_BIT)
|
||||
#define PTRACE_CFI_LP_LOCK_STATE BIT(PTRACE_CFI_LP_LOCK_BIT)
|
||||
#define PTRACE_CFI_ELP_STATE BIT(PTRACE_CFI_ELP_BIT)
|
||||
#define PTRACE_CFI_SS_EN_STATE BIT(PTRACE_CFI_SS_EN_BIT)
|
||||
#define PTRACE_CFI_SS_LOCK_STATE BIT(PTRACE_CFI_SS_LOCK_BIT)
|
||||
#define PTRACE_CFI_SS_PTR_STATE BIT(PTRACE_CFI_SS_PTR_BIT)
|
||||
#define PTRACE_CFI_LP_EN_STATE _BITUL(PTRACE_CFI_LP_EN_BIT)
|
||||
#define PTRACE_CFI_LP_LOCK_STATE _BITUL(PTRACE_CFI_LP_LOCK_BIT)
|
||||
#define PTRACE_CFI_ELP_STATE _BITUL(PTRACE_CFI_ELP_BIT)
|
||||
#define PTRACE_CFI_SS_EN_STATE _BITUL(PTRACE_CFI_SS_EN_BIT)
|
||||
#define PTRACE_CFI_SS_LOCK_STATE _BITUL(PTRACE_CFI_SS_LOCK_BIT)
|
||||
#define PTRACE_CFI_SS_PTR_STATE _BITUL(PTRACE_CFI_SS_PTR_BIT)
|
||||
|
||||
#define PRACE_CFI_STATE_INVALID_MASK ~(PTRACE_CFI_LP_EN_STATE | \
|
||||
PTRACE_CFI_LP_LOCK_STATE | \
|
||||
|
|
|
|||
|
|
@ -175,7 +175,7 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
|
|||
{DBG_REG_T1, GDB_SIZEOF_REG, offsetof(struct pt_regs, t1)},
|
||||
{DBG_REG_T2, GDB_SIZEOF_REG, offsetof(struct pt_regs, t2)},
|
||||
{DBG_REG_FP, GDB_SIZEOF_REG, offsetof(struct pt_regs, s0)},
|
||||
{DBG_REG_S1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)},
|
||||
{DBG_REG_S1, GDB_SIZEOF_REG, offsetof(struct pt_regs, s1)},
|
||||
{DBG_REG_A0, GDB_SIZEOF_REG, offsetof(struct pt_regs, a0)},
|
||||
{DBG_REG_A1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)},
|
||||
{DBG_REG_A2, GDB_SIZEOF_REG, offsetof(struct pt_regs, a2)},
|
||||
|
|
@ -244,8 +244,9 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
|
|||
gdb_regs[DBG_REG_S6_OFF] = task->thread.s[6];
|
||||
gdb_regs[DBG_REG_S7_OFF] = task->thread.s[7];
|
||||
gdb_regs[DBG_REG_S8_OFF] = task->thread.s[8];
|
||||
gdb_regs[DBG_REG_S9_OFF] = task->thread.s[10];
|
||||
gdb_regs[DBG_REG_S10_OFF] = task->thread.s[11];
|
||||
gdb_regs[DBG_REG_S9_OFF] = task->thread.s[9];
|
||||
gdb_regs[DBG_REG_S10_OFF] = task->thread.s[10];
|
||||
gdb_regs[DBG_REG_S11_OFF] = task->thread.s[11];
|
||||
gdb_regs[DBG_REG_EPC_OFF] = task->thread.ra;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -42,19 +42,20 @@ static inline bool is_kernel_exittext(uintptr_t addr)
|
|||
static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
|
||||
{
|
||||
uintptr_t uintaddr = (uintptr_t) addr;
|
||||
struct page *page;
|
||||
phys_addr_t phys;
|
||||
|
||||
if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr))
|
||||
page = phys_to_page(__pa_symbol(addr));
|
||||
else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
|
||||
page = vmalloc_to_page(addr);
|
||||
else
|
||||
if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr)) {
|
||||
phys = __pa_symbol(addr);
|
||||
} else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) {
|
||||
struct page *page = vmalloc_to_page(addr);
|
||||
|
||||
BUG_ON(!page);
|
||||
phys = page_to_phys(page) + offset_in_page(addr);
|
||||
} else {
|
||||
return addr;
|
||||
}
|
||||
|
||||
BUG_ON(!page);
|
||||
|
||||
return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
|
||||
offset_in_page(addr));
|
||||
return (void *)set_fixmap_offset(fixmap, phys);
|
||||
}
|
||||
|
||||
static void patch_unmap(int fixmap)
|
||||
|
|
|
|||
|
|
@ -347,8 +347,10 @@ long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
|
|||
if (arg & PR_TAGGED_ADDR_ENABLE && (tagged_addr_disabled || !pmlen))
|
||||
return -EINVAL;
|
||||
|
||||
if (!(arg & PR_TAGGED_ADDR_ENABLE))
|
||||
if (!(arg & PR_TAGGED_ADDR_ENABLE)) {
|
||||
pmlen = PMLEN_0;
|
||||
pmm = ENVCFG_PMM_PMLEN_0;
|
||||
}
|
||||
|
||||
if (mmap_write_lock_killable(mm))
|
||||
return -EINTR;
|
||||
|
|
|
|||
|
|
@ -62,8 +62,8 @@ do { \
|
|||
* @size: number of elements in array
|
||||
*/
|
||||
#define array_index_mask_nospec array_index_mask_nospec
|
||||
static inline unsigned long array_index_mask_nospec(unsigned long index,
|
||||
unsigned long size)
|
||||
static __always_inline unsigned long array_index_mask_nospec(unsigned long index,
|
||||
unsigned long size)
|
||||
{
|
||||
unsigned long mask;
|
||||
|
||||
|
|
|
|||
|
|
@ -710,6 +710,9 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm);
|
|||
void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
|
||||
unsigned long *aqm, unsigned long *adm);
|
||||
|
||||
#define SIE64_RETURN_NORMAL 0
|
||||
#define SIE64_RETURN_MCCK 1
|
||||
|
||||
int __sie64a(phys_addr_t sie_block_phys, struct kvm_s390_sie_block *sie_block, u64 *rsa,
|
||||
unsigned long gasce);
|
||||
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ struct stack_frame {
|
|||
struct {
|
||||
unsigned long sie_control_block;
|
||||
unsigned long sie_savearea;
|
||||
unsigned long sie_reason;
|
||||
unsigned long sie_return;
|
||||
unsigned long sie_flags;
|
||||
unsigned long sie_control_block_phys;
|
||||
unsigned long sie_guest_asce;
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ int main(void)
|
|||
OFFSET(__SF_EMPTY, stack_frame, empty[0]);
|
||||
OFFSET(__SF_SIE_CONTROL, stack_frame, sie_control_block);
|
||||
OFFSET(__SF_SIE_SAVEAREA, stack_frame, sie_savearea);
|
||||
OFFSET(__SF_SIE_REASON, stack_frame, sie_reason);
|
||||
OFFSET(__SF_SIE_RETURN, stack_frame, sie_return);
|
||||
OFFSET(__SF_SIE_FLAGS, stack_frame, sie_flags);
|
||||
OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys);
|
||||
OFFSET(__SF_SIE_GUEST_ASCE, stack_frame, sie_guest_asce);
|
||||
|
|
|
|||
|
|
@ -200,7 +200,7 @@ SYM_FUNC_START(__sie64a)
|
|||
stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses
|
||||
stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area
|
||||
stg %r5,__SF_SIE_GUEST_ASCE(%r15) # save guest asce
|
||||
xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
|
||||
xc __SF_SIE_RETURN(8,%r15),__SF_SIE_RETURN(%r15) # return code = 0
|
||||
mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r14) # copy thread flags
|
||||
lmg %r0,%r13,0(%r4) # load guest gprs 0-13
|
||||
mvi __TI_sie(%r14),1
|
||||
|
|
@ -237,7 +237,7 @@ SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL)
|
|||
xgr %r4,%r4
|
||||
xgr %r5,%r5
|
||||
lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
|
||||
lg %r2,__SF_SIE_REASON(%r15) # return exit reason code
|
||||
lg %r2,__SF_SIE_RETURN(%r15) # return sie return code
|
||||
BR_EX %r14
|
||||
SYM_FUNC_END(__sie64a)
|
||||
EXPORT_SYMBOL(__sie64a)
|
||||
|
|
@ -271,6 +271,7 @@ SYM_CODE_START(system_call)
|
|||
xgr %r9,%r9
|
||||
xgr %r10,%r10
|
||||
xgr %r11,%r11
|
||||
xgr %r12,%r12
|
||||
la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
|
||||
mvc __PT_R8(64,%r2),__LC_SAVE_AREA(%r13)
|
||||
MBEAR %r2,%r13
|
||||
|
|
@ -407,6 +408,7 @@ SYM_CODE_START(\name)
|
|||
xgr %r6,%r6
|
||||
xgr %r7,%r7
|
||||
xgr %r10,%r10
|
||||
xgr %r12,%r12
|
||||
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
|
||||
mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
|
||||
MBEAR %r11,%r13
|
||||
|
|
@ -496,6 +498,7 @@ SYM_CODE_START(mcck_int_handler)
|
|||
xgr %r6,%r6
|
||||
xgr %r7,%r7
|
||||
xgr %r10,%r10
|
||||
xgr %r12,%r12
|
||||
stmg %r8,%r9,__PT_PSW(%r11)
|
||||
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
|
||||
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
|
||||
|
|
|
|||
|
|
@ -487,8 +487,8 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
|
|||
mcck_dam_code = (mci.val & MCIC_SUBCLASS_MASK);
|
||||
if (test_cpu_flag(CIF_MCCK_GUEST) &&
|
||||
(mcck_dam_code & MCCK_CODE_NO_GUEST) != mcck_dam_code) {
|
||||
/* Set exit reason code for host's later handling */
|
||||
*((long *)(regs->gprs[15] + __SF_SIE_REASON)) = -EINTR;
|
||||
/* Set sie return code for host's later handling */
|
||||
((struct stack_frame *)regs->gprs[15])->sie_return = SIE64_RETURN_MCCK;
|
||||
}
|
||||
clear_cpu_flag(CIF_MCCK_GUEST);
|
||||
|
||||
|
|
|
|||
|
|
@ -1168,6 +1168,7 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
|
|||
static void hw_perf_event_update(struct perf_event *event, int flush_all)
|
||||
{
|
||||
unsigned long long event_overflow, sampl_overflow, num_sdb;
|
||||
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
union hws_trailer_header prev, new;
|
||||
struct hws_trailer_entry *te;
|
||||
|
|
@ -1247,8 +1248,11 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
|
|||
* are dropped.
|
||||
* Slightly increase the interval to avoid hitting this limit.
|
||||
*/
|
||||
if (event_overflow)
|
||||
if (event_overflow) {
|
||||
SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10);
|
||||
if (SAMPL_RATE(hwc) > cpuhw->qsi.max_sampl_rate)
|
||||
SAMPL_RATE(hwc) = cpuhw->qsi.max_sampl_rate;
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned long aux_sdb_index(struct aux_buffer *aux,
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
|
|
@ -131,8 +132,10 @@ void noinstr __do_syscall(struct pt_regs *regs, int per_trap)
|
|||
if (unlikely(test_and_clear_pt_regs_flag(regs, PIF_SYSCALL_RET_SET)))
|
||||
goto out;
|
||||
regs->gprs[2] = -ENOSYS;
|
||||
if (likely(nr < NR_syscalls))
|
||||
if (likely(nr < NR_syscalls)) {
|
||||
nr = array_index_nospec(nr, NR_syscalls);
|
||||
regs->gprs[2] = sys_call_table[nr](regs);
|
||||
}
|
||||
out:
|
||||
syscall_exit_to_user_mode(regs);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -134,32 +134,6 @@ int dat_set_asce_limit(struct kvm_s390_mmu_cache *mc, union asce *asce, int newt
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dat_crstep_xchg() - Exchange a gmap CRSTE with another.
|
||||
* @crstep: Pointer to the CRST entry
|
||||
* @new: Replacement entry.
|
||||
* @gfn: The affected guest address.
|
||||
* @asce: The ASCE of the address space.
|
||||
*
|
||||
* Context: This function is assumed to be called with kvm->mmu_lock held.
|
||||
*/
|
||||
void dat_crstep_xchg(union crste *crstep, union crste new, gfn_t gfn, union asce asce)
|
||||
{
|
||||
if (crstep->h.i) {
|
||||
WRITE_ONCE(*crstep, new);
|
||||
return;
|
||||
} else if (cpu_has_edat2()) {
|
||||
crdte_crste(crstep, *crstep, new, gfn, asce);
|
||||
return;
|
||||
}
|
||||
|
||||
if (machine_has_tlb_guest())
|
||||
idte_crste(crstep, gfn, IDTE_GUEST_ASCE, asce, IDTE_GLOBAL);
|
||||
else
|
||||
idte_crste(crstep, gfn, 0, NULL_ASCE, IDTE_GLOBAL);
|
||||
WRITE_ONCE(*crstep, new);
|
||||
}
|
||||
|
||||
/**
|
||||
* dat_crstep_xchg_atomic() - Atomically exchange a gmap CRSTE with another.
|
||||
* @crstep: Pointer to the CRST entry.
|
||||
|
|
@ -175,8 +149,8 @@ void dat_crstep_xchg(union crste *crstep, union crste new, gfn_t gfn, union asce
|
|||
*
|
||||
* Return: %true if the exchange was successful.
|
||||
*/
|
||||
bool dat_crstep_xchg_atomic(union crste *crstep, union crste old, union crste new, gfn_t gfn,
|
||||
union asce asce)
|
||||
bool __must_check dat_crstep_xchg_atomic(union crste *crstep, union crste old, union crste new,
|
||||
gfn_t gfn, union asce asce)
|
||||
{
|
||||
if (old.h.i)
|
||||
return arch_try_cmpxchg((long *)crstep, &old.val, new.val);
|
||||
|
|
@ -292,6 +266,7 @@ static int dat_split_ste(struct kvm_s390_mmu_cache *mc, union pmd *pmdp, gfn_t g
|
|||
pt->ptes[i].val = init.val | i * PAGE_SIZE;
|
||||
/* No need to take locks as the page table is not installed yet. */
|
||||
pgste_init.prefix_notif = old.s.fc1.prefix_notif;
|
||||
pgste_init.vsie_notif = old.s.fc1.vsie_notif;
|
||||
pgste_init.pcl = uses_skeys && init.h.i;
|
||||
dat_init_pgstes(pt, pgste_init.val);
|
||||
} else {
|
||||
|
|
@ -893,7 +868,8 @@ static long _dat_slot_crste(union crste *crstep, gfn_t gfn, gfn_t next, struct d
|
|||
|
||||
/* This table entry needs to be updated. */
|
||||
if (walk->start <= gfn && walk->end >= next) {
|
||||
dat_crstep_xchg_atomic(crstep, crste, new_crste, gfn, walk->asce);
|
||||
if (!dat_crstep_xchg_atomic(crstep, crste, new_crste, gfn, walk->asce))
|
||||
return -EINVAL;
|
||||
/* A lower level table was present, needs to be freed. */
|
||||
if (!crste.h.fc && !crste.h.i) {
|
||||
if (is_pmd(crste))
|
||||
|
|
@ -1021,67 +997,21 @@ bool dat_test_age_gfn(union asce asce, gfn_t start, gfn_t end)
|
|||
return _dat_walk_gfn_range(start, end, asce, &test_age_ops, 0, NULL) > 0;
|
||||
}
|
||||
|
||||
int dat_link(struct kvm_s390_mmu_cache *mc, union asce asce, int level,
|
||||
bool uses_skeys, struct guest_fault *f)
|
||||
{
|
||||
union crste oldval, newval;
|
||||
union pte newpte, oldpte;
|
||||
union pgste pgste;
|
||||
int rc = 0;
|
||||
|
||||
rc = dat_entry_walk(mc, f->gfn, asce, DAT_WALK_ALLOC_CONTINUE, level, &f->crstep, &f->ptep);
|
||||
if (rc == -EINVAL || rc == -ENOMEM)
|
||||
return rc;
|
||||
if (rc)
|
||||
return -EAGAIN;
|
||||
|
||||
if (WARN_ON_ONCE(unlikely(get_level(f->crstep, f->ptep) > level)))
|
||||
return -EINVAL;
|
||||
|
||||
if (f->ptep) {
|
||||
pgste = pgste_get_lock(f->ptep);
|
||||
oldpte = *f->ptep;
|
||||
newpte = _pte(f->pfn, f->writable, f->write_attempt | oldpte.s.d, !f->page);
|
||||
newpte.s.sd = oldpte.s.sd;
|
||||
oldpte.s.sd = 0;
|
||||
if (oldpte.val == _PTE_EMPTY.val || oldpte.h.pfra == f->pfn) {
|
||||
pgste = __dat_ptep_xchg(f->ptep, pgste, newpte, f->gfn, asce, uses_skeys);
|
||||
if (f->callback)
|
||||
f->callback(f);
|
||||
} else {
|
||||
rc = -EAGAIN;
|
||||
}
|
||||
pgste_set_unlock(f->ptep, pgste);
|
||||
} else {
|
||||
oldval = READ_ONCE(*f->crstep);
|
||||
newval = _crste_fc1(f->pfn, oldval.h.tt, f->writable,
|
||||
f->write_attempt | oldval.s.fc1.d);
|
||||
newval.s.fc1.sd = oldval.s.fc1.sd;
|
||||
if (oldval.val != _CRSTE_EMPTY(oldval.h.tt).val &&
|
||||
crste_origin_large(oldval) != crste_origin_large(newval))
|
||||
return -EAGAIN;
|
||||
if (!dat_crstep_xchg_atomic(f->crstep, oldval, newval, f->gfn, asce))
|
||||
return -EAGAIN;
|
||||
if (f->callback)
|
||||
f->callback(f);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static long dat_set_pn_crste(union crste *crstep, gfn_t gfn, gfn_t next, struct dat_walk *walk)
|
||||
{
|
||||
union crste crste = READ_ONCE(*crstep);
|
||||
union crste newcrste, oldcrste;
|
||||
int *n = walk->priv;
|
||||
|
||||
if (!crste.h.fc || crste.h.i || crste.h.p)
|
||||
return 0;
|
||||
|
||||
do {
|
||||
oldcrste = READ_ONCE(*crstep);
|
||||
if (!oldcrste.h.fc || oldcrste.h.i || oldcrste.h.p)
|
||||
return 0;
|
||||
if (oldcrste.s.fc1.prefix_notif)
|
||||
break;
|
||||
newcrste = oldcrste;
|
||||
newcrste.s.fc1.prefix_notif = 1;
|
||||
} while (!dat_crstep_xchg_atomic(crstep, oldcrste, newcrste, gfn, walk->asce));
|
||||
*n = 2;
|
||||
if (crste.s.fc1.prefix_notif)
|
||||
return 0;
|
||||
crste.s.fc1.prefix_notif = 1;
|
||||
dat_crstep_xchg(crstep, crste, gfn, walk->asce);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -160,14 +160,14 @@ union pmd {
|
|||
unsigned long :44; /* HW */
|
||||
unsigned long : 3; /* Unused */
|
||||
unsigned long : 1; /* HW */
|
||||
unsigned long s : 1; /* Special */
|
||||
unsigned long w : 1; /* Writable soft-bit */
|
||||
unsigned long r : 1; /* Readable soft-bit */
|
||||
unsigned long d : 1; /* Dirty */
|
||||
unsigned long y : 1; /* Young */
|
||||
unsigned long prefix_notif : 1; /* Guest prefix invalidation notification */
|
||||
unsigned long : 3; /* HW */
|
||||
unsigned long prefix_notif : 1; /* Guest prefix invalidation notification */
|
||||
unsigned long vsie_notif : 1; /* Referenced in a shadow table */
|
||||
unsigned long : 1; /* Unused */
|
||||
unsigned long : 4; /* HW */
|
||||
unsigned long sd : 1; /* Soft-Dirty */
|
||||
unsigned long pr : 1; /* Present */
|
||||
|
|
@ -183,14 +183,14 @@ union pud {
|
|||
unsigned long :33; /* HW */
|
||||
unsigned long :14; /* Unused */
|
||||
unsigned long : 1; /* HW */
|
||||
unsigned long s : 1; /* Special */
|
||||
unsigned long w : 1; /* Writable soft-bit */
|
||||
unsigned long r : 1; /* Readable soft-bit */
|
||||
unsigned long d : 1; /* Dirty */
|
||||
unsigned long y : 1; /* Young */
|
||||
unsigned long prefix_notif : 1; /* Guest prefix invalidation notification */
|
||||
unsigned long : 3; /* HW */
|
||||
unsigned long prefix_notif : 1; /* Guest prefix invalidation notification */
|
||||
unsigned long vsie_notif : 1; /* Referenced in a shadow table */
|
||||
unsigned long : 1; /* Unused */
|
||||
unsigned long : 4; /* HW */
|
||||
unsigned long sd : 1; /* Soft-Dirty */
|
||||
unsigned long pr : 1; /* Present */
|
||||
|
|
@ -254,14 +254,14 @@ union crste {
|
|||
struct {
|
||||
unsigned long :47;
|
||||
unsigned long : 1; /* HW (should be 0) */
|
||||
unsigned long s : 1; /* Special */
|
||||
unsigned long w : 1; /* Writable */
|
||||
unsigned long r : 1; /* Readable */
|
||||
unsigned long d : 1; /* Dirty */
|
||||
unsigned long y : 1; /* Young */
|
||||
unsigned long prefix_notif : 1; /* Guest prefix invalidation notification */
|
||||
unsigned long : 3; /* HW */
|
||||
unsigned long prefix_notif : 1; /* Guest prefix invalidation notification */
|
||||
unsigned long vsie_notif : 1; /* Referenced in a shadow table */
|
||||
unsigned long : 1;
|
||||
unsigned long : 4; /* HW */
|
||||
unsigned long sd : 1; /* Soft-Dirty */
|
||||
unsigned long pr : 1; /* Present */
|
||||
|
|
@ -540,8 +540,6 @@ int dat_set_slot(struct kvm_s390_mmu_cache *mc, union asce asce, gfn_t start, gf
|
|||
u16 type, u16 param);
|
||||
int dat_set_prefix_notif_bit(union asce asce, gfn_t gfn);
|
||||
bool dat_test_age_gfn(union asce asce, gfn_t start, gfn_t end);
|
||||
int dat_link(struct kvm_s390_mmu_cache *mc, union asce asce, int level,
|
||||
bool uses_skeys, struct guest_fault *f);
|
||||
|
||||
int dat_perform_essa(union asce asce, gfn_t gfn, int orc, union essa_state *state, bool *dirty);
|
||||
long dat_reset_cmma(union asce asce, gfn_t start_gfn);
|
||||
|
|
@ -938,11 +936,14 @@ static inline bool dat_pudp_xchg_atomic(union pud *pudp, union pud old, union pu
|
|||
return dat_crstep_xchg_atomic(_CRSTEP(pudp), _CRSTE(old), _CRSTE(new), gfn, asce);
|
||||
}
|
||||
|
||||
static inline void dat_crstep_clear(union crste *crstep, gfn_t gfn, union asce asce)
|
||||
static inline union crste dat_crstep_clear_atomic(union crste *crstep, gfn_t gfn, union asce asce)
|
||||
{
|
||||
union crste newcrste = _CRSTE_EMPTY(crstep->h.tt);
|
||||
union crste oldcrste, empty = _CRSTE_EMPTY(crstep->h.tt);
|
||||
|
||||
dat_crstep_xchg(crstep, newcrste, gfn, asce);
|
||||
do {
|
||||
oldcrste = READ_ONCE(*crstep);
|
||||
} while (!dat_crstep_xchg_atomic(crstep, oldcrste, empty, gfn, asce));
|
||||
return oldcrste;
|
||||
}
|
||||
|
||||
static inline int get_level(union crste *crstep, union pte *ptep)
|
||||
|
|
|
|||
|
|
@ -1434,17 +1434,27 @@ static int _do_shadow_pte(struct gmap *sg, gpa_t raddr, union pte *ptep_h, union
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
pgste = pgste_get_lock(ptep_h);
|
||||
newpte = _pte(f->pfn, f->writable, !p, 0);
|
||||
newpte.s.d |= ptep->s.d;
|
||||
newpte.s.sd |= ptep->s.sd;
|
||||
newpte.h.p &= ptep->h.p;
|
||||
pgste = _gmap_ptep_xchg(sg->parent, ptep_h, newpte, pgste, f->gfn, false);
|
||||
pgste.vsie_notif = 1;
|
||||
if (!pgste_get_trylock(ptep_h, &pgste))
|
||||
return -EAGAIN;
|
||||
newpte = _pte(f->pfn, f->writable, !p, ptep_h->s.s);
|
||||
newpte.s.d |= ptep_h->s.d;
|
||||
newpte.s.sd |= ptep_h->s.sd;
|
||||
newpte.h.p &= ptep_h->h.p;
|
||||
if (!newpte.h.p && !f->writable) {
|
||||
rc = -EOPNOTSUPP;
|
||||
} else {
|
||||
pgste = _gmap_ptep_xchg(sg->parent, ptep_h, newpte, pgste, f->gfn, false);
|
||||
pgste.vsie_notif = 1;
|
||||
}
|
||||
pgste_set_unlock(ptep_h, pgste);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (!sg->parent)
|
||||
return -EAGAIN;
|
||||
|
||||
newpte = _pte(f->pfn, 0, !p, 0);
|
||||
pgste = pgste_get_lock(ptep);
|
||||
if (!pgste_get_trylock(ptep, &pgste))
|
||||
return -EAGAIN;
|
||||
pgste = __dat_ptep_xchg(ptep, pgste, newpte, gpa_to_gfn(raddr), sg->asce, uses_skeys(sg));
|
||||
pgste_set_unlock(ptep, pgste);
|
||||
|
||||
|
|
@ -1454,7 +1464,7 @@ static int _do_shadow_pte(struct gmap *sg, gpa_t raddr, union pte *ptep_h, union
|
|||
static int _do_shadow_crste(struct gmap *sg, gpa_t raddr, union crste *host, union crste *table,
|
||||
struct guest_fault *f, bool p)
|
||||
{
|
||||
union crste newcrste;
|
||||
union crste newcrste, oldcrste;
|
||||
gfn_t gfn;
|
||||
int rc;
|
||||
|
||||
|
|
@ -1467,16 +1477,28 @@ static int _do_shadow_crste(struct gmap *sg, gpa_t raddr, union crste *host, uni
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
newcrste = _crste_fc1(f->pfn, host->h.tt, f->writable, !p);
|
||||
newcrste.s.fc1.d |= host->s.fc1.d;
|
||||
newcrste.s.fc1.sd |= host->s.fc1.sd;
|
||||
newcrste.h.p &= host->h.p;
|
||||
newcrste.s.fc1.vsie_notif = 1;
|
||||
newcrste.s.fc1.prefix_notif = host->s.fc1.prefix_notif;
|
||||
_gmap_crstep_xchg(sg->parent, host, newcrste, f->gfn, false);
|
||||
do {
|
||||
/* _gmap_crstep_xchg_atomic() could have unshadowed this shadow gmap */
|
||||
if (!sg->parent)
|
||||
return -EAGAIN;
|
||||
oldcrste = READ_ONCE(*host);
|
||||
newcrste = _crste_fc1(f->pfn, oldcrste.h.tt, f->writable, !p);
|
||||
newcrste.s.fc1.d |= oldcrste.s.fc1.d;
|
||||
newcrste.s.fc1.sd |= oldcrste.s.fc1.sd;
|
||||
newcrste.h.p &= oldcrste.h.p;
|
||||
newcrste.s.fc1.vsie_notif = 1;
|
||||
newcrste.s.fc1.prefix_notif = oldcrste.s.fc1.prefix_notif;
|
||||
newcrste.s.fc1.s = oldcrste.s.fc1.s;
|
||||
if (!newcrste.h.p && !f->writable)
|
||||
return -EOPNOTSUPP;
|
||||
} while (!_gmap_crstep_xchg_atomic(sg->parent, host, oldcrste, newcrste, f->gfn, false));
|
||||
if (!sg->parent)
|
||||
return -EAGAIN;
|
||||
|
||||
newcrste = _crste_fc1(f->pfn, host->h.tt, 0, !p);
|
||||
dat_crstep_xchg(table, newcrste, gpa_to_gfn(raddr), sg->asce);
|
||||
newcrste = _crste_fc1(f->pfn, oldcrste.h.tt, 0, !p);
|
||||
gfn = gpa_to_gfn(raddr);
|
||||
while (!dat_crstep_xchg_atomic(table, READ_ONCE(*table), newcrste, gfn, sg->asce))
|
||||
;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -1500,21 +1522,31 @@ static int _gaccess_do_shadow(struct kvm_s390_mmu_cache *mc, struct gmap *sg,
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* A race occourred. The shadow mapping is already valid, nothing to do */
|
||||
if ((ptep && !ptep->h.i) || (!ptep && crste_leaf(*table)))
|
||||
/* A race occurred. The shadow mapping is already valid, nothing to do */
|
||||
if ((ptep && !ptep->h.i && ptep->h.p == w->p) ||
|
||||
(!ptep && crste_leaf(*table) && !table->h.i && table->h.p == w->p))
|
||||
return 0;
|
||||
|
||||
gl = get_level(table, ptep);
|
||||
|
||||
/* In case of a real address space */
|
||||
if (w->level <= LEVEL_MEM) {
|
||||
l = TABLE_TYPE_PAGE_TABLE;
|
||||
hl = TABLE_TYPE_REGION1;
|
||||
goto real_address_space;
|
||||
}
|
||||
|
||||
/*
|
||||
* Skip levels that are already protected. For each level, protect
|
||||
* only the page containing the entry, not the whole table.
|
||||
*/
|
||||
for (i = gl ; i >= w->level; i--) {
|
||||
rc = gmap_protect_rmap(mc, sg, entries[i - 1].gfn, gpa_to_gfn(saddr),
|
||||
entries[i - 1].pfn, i, entries[i - 1].writable);
|
||||
rc = gmap_protect_rmap(mc, sg, entries[i].gfn, gpa_to_gfn(saddr),
|
||||
entries[i].pfn, i + 1, entries[i].writable);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (!sg->parent)
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
rc = dat_entry_walk(NULL, entries[LEVEL_MEM].gfn, sg->parent->asce, DAT_WALK_LEAF,
|
||||
|
|
@ -1526,6 +1558,7 @@ static int _gaccess_do_shadow(struct kvm_s390_mmu_cache *mc, struct gmap *sg,
|
|||
/* Get the smallest granularity */
|
||||
l = min3(gl, hl, w->level);
|
||||
|
||||
real_address_space:
|
||||
flags = DAT_WALK_SPLIT_ALLOC | (uses_skeys(sg->parent) ? DAT_WALK_USES_SKEYS : 0);
|
||||
/* If necessary, create the shadow mapping */
|
||||
if (l < gl) {
|
||||
|
|
|
|||
|
|
@ -313,13 +313,16 @@ static long gmap_clear_young_crste(union crste *crstep, gfn_t gfn, gfn_t end, st
|
|||
struct clear_young_pte_priv *priv = walk->priv;
|
||||
union crste crste, new;
|
||||
|
||||
crste = READ_ONCE(*crstep);
|
||||
do {
|
||||
crste = READ_ONCE(*crstep);
|
||||
|
||||
if (!crste.h.fc)
|
||||
return 0;
|
||||
if (!crste.s.fc1.y && crste.h.i)
|
||||
return 0;
|
||||
if (crste_prefix(crste) && !gmap_mkold_prefix(priv->gmap, gfn, end))
|
||||
break;
|
||||
|
||||
if (!crste.h.fc)
|
||||
return 0;
|
||||
if (!crste.s.fc1.y && crste.h.i)
|
||||
return 0;
|
||||
if (!crste_prefix(crste) || gmap_mkold_prefix(priv->gmap, gfn, end)) {
|
||||
new = crste;
|
||||
new.h.i = 1;
|
||||
new.s.fc1.y = 0;
|
||||
|
|
@ -328,8 +331,8 @@ static long gmap_clear_young_crste(union crste *crstep, gfn_t gfn, gfn_t end, st
|
|||
folio_set_dirty(phys_to_folio(crste_origin_large(crste)));
|
||||
new.s.fc1.d = 0;
|
||||
new.h.p = 1;
|
||||
dat_crstep_xchg(crstep, new, gfn, walk->asce);
|
||||
}
|
||||
} while (!dat_crstep_xchg_atomic(crstep, crste, new, gfn, walk->asce));
|
||||
|
||||
priv->young = 1;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -391,14 +394,18 @@ static long _gmap_unmap_crste(union crste *crstep, gfn_t gfn, gfn_t next, struct
|
|||
{
|
||||
struct gmap_unmap_priv *priv = walk->priv;
|
||||
struct folio *folio = NULL;
|
||||
union crste old = *crstep;
|
||||
|
||||
if (crstep->h.fc) {
|
||||
if (crstep->s.fc1.pr && test_bit(GMAP_FLAG_EXPORT_ON_UNMAP, &priv->gmap->flags))
|
||||
folio = phys_to_folio(crste_origin_large(*crstep));
|
||||
gmap_crstep_xchg(priv->gmap, crstep, _CRSTE_EMPTY(crstep->h.tt), gfn);
|
||||
if (folio)
|
||||
uv_convert_from_secure_folio(folio);
|
||||
}
|
||||
if (!old.h.fc)
|
||||
return 0;
|
||||
|
||||
if (old.s.fc1.pr && test_bit(GMAP_FLAG_EXPORT_ON_UNMAP, &priv->gmap->flags))
|
||||
folio = phys_to_folio(crste_origin_large(old));
|
||||
/* No races should happen because kvm->mmu_lock is held in write mode */
|
||||
KVM_BUG_ON(!gmap_crstep_xchg_atomic(priv->gmap, crstep, old, _CRSTE_EMPTY(old.h.tt), gfn),
|
||||
priv->gmap->kvm);
|
||||
if (folio)
|
||||
uv_convert_from_secure_folio(folio);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -474,23 +481,24 @@ static long _crste_test_and_clear_softdirty(union crste *table, gfn_t gfn, gfn_t
|
|||
|
||||
if (fatal_signal_pending(current))
|
||||
return 1;
|
||||
crste = READ_ONCE(*table);
|
||||
if (!crste.h.fc)
|
||||
return 0;
|
||||
if (crste.h.p && !crste.s.fc1.sd)
|
||||
return 0;
|
||||
do {
|
||||
crste = READ_ONCE(*table);
|
||||
if (!crste.h.fc)
|
||||
return 0;
|
||||
if (crste.h.p && !crste.s.fc1.sd)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If this large page contains one or more prefixes of vCPUs that are
|
||||
* currently running, do not reset the protection, leave it marked as
|
||||
* dirty.
|
||||
*/
|
||||
if (!crste.s.fc1.prefix_notif || gmap_mkold_prefix(gmap, gfn, end)) {
|
||||
/*
|
||||
* If this large page contains one or more prefixes of vCPUs that are
|
||||
* currently running, do not reset the protection, leave it marked as
|
||||
* dirty.
|
||||
*/
|
||||
if (crste.s.fc1.prefix_notif && !gmap_mkold_prefix(gmap, gfn, end))
|
||||
break;
|
||||
new = crste;
|
||||
new.h.p = 1;
|
||||
new.s.fc1.sd = 0;
|
||||
gmap_crstep_xchg(gmap, table, new, gfn);
|
||||
}
|
||||
} while (!gmap_crstep_xchg_atomic(gmap, table, crste, new, gfn));
|
||||
|
||||
for ( ; gfn < end; gfn++)
|
||||
mark_page_dirty(gmap->kvm, gfn);
|
||||
|
|
@ -511,7 +519,7 @@ void gmap_sync_dirty_log(struct gmap *gmap, gfn_t start, gfn_t end)
|
|||
_dat_walk_gfn_range(start, end, gmap->asce, &walk_ops, 0, gmap);
|
||||
}
|
||||
|
||||
static int gmap_handle_minor_crste_fault(union asce asce, struct guest_fault *f)
|
||||
static int gmap_handle_minor_crste_fault(struct gmap *gmap, struct guest_fault *f)
|
||||
{
|
||||
union crste newcrste, oldcrste = READ_ONCE(*f->crstep);
|
||||
|
||||
|
|
@ -536,10 +544,8 @@ static int gmap_handle_minor_crste_fault(union asce asce, struct guest_fault *f)
|
|||
newcrste.s.fc1.d = 1;
|
||||
newcrste.s.fc1.sd = 1;
|
||||
}
|
||||
if (!oldcrste.s.fc1.d && newcrste.s.fc1.d)
|
||||
SetPageDirty(phys_to_page(crste_origin_large(newcrste)));
|
||||
/* In case of races, let the slow path deal with it. */
|
||||
return !dat_crstep_xchg_atomic(f->crstep, oldcrste, newcrste, f->gfn, asce);
|
||||
return !gmap_crstep_xchg_atomic(gmap, f->crstep, oldcrste, newcrste, f->gfn);
|
||||
}
|
||||
/* Trying to write on a read-only page, let the slow path deal with it. */
|
||||
return 1;
|
||||
|
|
@ -568,8 +574,6 @@ static int _gmap_handle_minor_pte_fault(struct gmap *gmap, union pgste *pgste,
|
|||
newpte.s.d = 1;
|
||||
newpte.s.sd = 1;
|
||||
}
|
||||
if (!oldpte.s.d && newpte.s.d)
|
||||
SetPageDirty(pfn_to_page(newpte.h.pfra));
|
||||
*pgste = gmap_ptep_xchg(gmap, f->ptep, newpte, *pgste, f->gfn);
|
||||
|
||||
return 0;
|
||||
|
|
@ -606,7 +610,7 @@ int gmap_try_fixup_minor(struct gmap *gmap, struct guest_fault *fault)
|
|||
fault->callback(fault);
|
||||
pgste_set_unlock(fault->ptep, pgste);
|
||||
} else {
|
||||
rc = gmap_handle_minor_crste_fault(gmap->asce, fault);
|
||||
rc = gmap_handle_minor_crste_fault(gmap, fault);
|
||||
if (!rc && fault->callback)
|
||||
fault->callback(fault);
|
||||
}
|
||||
|
|
@ -623,10 +627,61 @@ static inline bool gmap_1m_allowed(struct gmap *gmap, gfn_t gfn)
|
|||
return test_bit(GMAP_FLAG_ALLOW_HPAGE_1M, &gmap->flags);
|
||||
}
|
||||
|
||||
static int _gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, int level,
|
||||
struct guest_fault *f)
|
||||
{
|
||||
union crste oldval, newval;
|
||||
union pte newpte, oldpte;
|
||||
union pgste pgste;
|
||||
int rc = 0;
|
||||
|
||||
rc = dat_entry_walk(mc, f->gfn, gmap->asce, DAT_WALK_ALLOC_CONTINUE, level,
|
||||
&f->crstep, &f->ptep);
|
||||
if (rc == -ENOMEM)
|
||||
return rc;
|
||||
if (KVM_BUG_ON(rc == -EINVAL, gmap->kvm))
|
||||
return rc;
|
||||
if (rc)
|
||||
return -EAGAIN;
|
||||
if (KVM_BUG_ON(get_level(f->crstep, f->ptep) > level, gmap->kvm))
|
||||
return -EINVAL;
|
||||
|
||||
if (f->ptep) {
|
||||
pgste = pgste_get_lock(f->ptep);
|
||||
oldpte = *f->ptep;
|
||||
newpte = _pte(f->pfn, f->writable, f->write_attempt | oldpte.s.d, !f->page);
|
||||
newpte.s.sd = oldpte.s.sd;
|
||||
oldpte.s.sd = 0;
|
||||
if (oldpte.val == _PTE_EMPTY.val || oldpte.h.pfra == f->pfn) {
|
||||
pgste = gmap_ptep_xchg(gmap, f->ptep, newpte, pgste, f->gfn);
|
||||
if (f->callback)
|
||||
f->callback(f);
|
||||
} else {
|
||||
rc = -EAGAIN;
|
||||
}
|
||||
pgste_set_unlock(f->ptep, pgste);
|
||||
} else {
|
||||
do {
|
||||
oldval = READ_ONCE(*f->crstep);
|
||||
newval = _crste_fc1(f->pfn, oldval.h.tt, f->writable,
|
||||
f->write_attempt | oldval.s.fc1.d);
|
||||
newval.s.fc1.s = !f->page;
|
||||
newval.s.fc1.sd = oldval.s.fc1.sd;
|
||||
if (oldval.val != _CRSTE_EMPTY(oldval.h.tt).val &&
|
||||
crste_origin_large(oldval) != crste_origin_large(newval))
|
||||
return -EAGAIN;
|
||||
} while (!gmap_crstep_xchg_atomic(gmap, f->crstep, oldval, newval, f->gfn));
|
||||
if (f->callback)
|
||||
f->callback(f);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *f)
|
||||
{
|
||||
unsigned int order;
|
||||
int rc, level;
|
||||
int level;
|
||||
|
||||
lockdep_assert_held(&gmap->kvm->mmu_lock);
|
||||
|
||||
|
|
@ -638,16 +693,14 @@ int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fau
|
|||
else if (order >= get_order(_SEGMENT_SIZE) && gmap_1m_allowed(gmap, f->gfn))
|
||||
level = TABLE_TYPE_SEGMENT;
|
||||
}
|
||||
rc = dat_link(mc, gmap->asce, level, uses_skeys(gmap), f);
|
||||
KVM_BUG_ON(rc == -EINVAL, gmap->kvm);
|
||||
return rc;
|
||||
return _gmap_link(mc, gmap, level, f);
|
||||
}
|
||||
|
||||
static int gmap_ucas_map_one(struct kvm_s390_mmu_cache *mc, struct gmap *gmap,
|
||||
gfn_t p_gfn, gfn_t c_gfn, bool force_alloc)
|
||||
{
|
||||
union crste newcrste, oldcrste;
|
||||
struct page_table *pt;
|
||||
union crste newcrste;
|
||||
union crste *crstep;
|
||||
union pte *ptep;
|
||||
int rc;
|
||||
|
|
@ -673,7 +726,11 @@ static int gmap_ucas_map_one(struct kvm_s390_mmu_cache *mc, struct gmap *gmap,
|
|||
&crstep, &ptep);
|
||||
if (rc)
|
||||
return rc;
|
||||
dat_crstep_xchg(crstep, newcrste, c_gfn, gmap->asce);
|
||||
do {
|
||||
oldcrste = READ_ONCE(*crstep);
|
||||
if (oldcrste.val == newcrste.val)
|
||||
break;
|
||||
} while (!dat_crstep_xchg_atomic(crstep, oldcrste, newcrste, c_gfn, gmap->asce));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -777,8 +834,10 @@ static void gmap_ucas_unmap_one(struct gmap *gmap, gfn_t c_gfn)
|
|||
int rc;
|
||||
|
||||
rc = dat_entry_walk(NULL, c_gfn, gmap->asce, 0, TABLE_TYPE_SEGMENT, &crstep, &ptep);
|
||||
if (!rc)
|
||||
dat_crstep_xchg(crstep, _PMD_EMPTY, c_gfn, gmap->asce);
|
||||
if (rc)
|
||||
return;
|
||||
while (!dat_crstep_xchg_atomic(crstep, READ_ONCE(*crstep), _PMD_EMPTY, c_gfn, gmap->asce))
|
||||
;
|
||||
}
|
||||
|
||||
void gmap_ucas_unmap(struct gmap *gmap, gfn_t c_gfn, unsigned long count)
|
||||
|
|
@ -1017,8 +1076,8 @@ static void gmap_unshadow_level(struct gmap *sg, gfn_t r_gfn, int level)
|
|||
dat_ptep_xchg(ptep, _PTE_EMPTY, r_gfn, sg->asce, uses_skeys(sg));
|
||||
return;
|
||||
}
|
||||
crste = READ_ONCE(*crstep);
|
||||
dat_crstep_clear(crstep, r_gfn, sg->asce);
|
||||
|
||||
crste = dat_crstep_clear_atomic(crstep, r_gfn, sg->asce);
|
||||
if (crste_leaf(crste) || crste.h.i)
|
||||
return;
|
||||
if (is_pmd(crste))
|
||||
|
|
@ -1101,6 +1160,7 @@ struct gmap_protect_asce_top_level {
|
|||
static inline int __gmap_protect_asce_top_level(struct kvm_s390_mmu_cache *mc, struct gmap *sg,
|
||||
struct gmap_protect_asce_top_level *context)
|
||||
{
|
||||
struct gmap *parent;
|
||||
int rc, i;
|
||||
|
||||
guard(write_lock)(&sg->kvm->mmu_lock);
|
||||
|
|
@ -1108,7 +1168,12 @@ static inline int __gmap_protect_asce_top_level(struct kvm_s390_mmu_cache *mc, s
|
|||
if (kvm_s390_array_needs_retry_safe(sg->kvm, context->seq, context->f))
|
||||
return -EAGAIN;
|
||||
|
||||
scoped_guard(spinlock, &sg->parent->children_lock) {
|
||||
parent = READ_ONCE(sg->parent);
|
||||
if (!parent)
|
||||
return -EAGAIN;
|
||||
scoped_guard(spinlock, &parent->children_lock) {
|
||||
if (READ_ONCE(sg->parent) != parent)
|
||||
return -EAGAIN;
|
||||
for (i = 0; i < CRST_TABLE_PAGES; i++) {
|
||||
if (!context->f[i].valid)
|
||||
continue;
|
||||
|
|
@ -1191,6 +1256,9 @@ struct gmap *gmap_create_shadow(struct kvm_s390_mmu_cache *mc, struct gmap *pare
|
|||
struct gmap *sg, *new;
|
||||
int rc;
|
||||
|
||||
if (WARN_ON(!parent))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
scoped_guard(spinlock, &parent->children_lock) {
|
||||
sg = gmap_find_shadow(parent, asce, edat_level);
|
||||
if (sg) {
|
||||
|
|
|
|||
|
|
@ -185,6 +185,8 @@ static inline union pgste _gmap_ptep_xchg(struct gmap *gmap, union pte *ptep, un
|
|||
else
|
||||
_gmap_handle_vsie_unshadow_event(gmap, gfn);
|
||||
}
|
||||
if (!ptep->s.d && newpte.s.d && !newpte.s.s)
|
||||
SetPageDirty(pfn_to_page(newpte.h.pfra));
|
||||
return __dat_ptep_xchg(ptep, pgste, newpte, gfn, gmap->asce, uses_skeys(gmap));
|
||||
}
|
||||
|
||||
|
|
@ -194,35 +196,42 @@ static inline union pgste gmap_ptep_xchg(struct gmap *gmap, union pte *ptep, uni
|
|||
return _gmap_ptep_xchg(gmap, ptep, newpte, pgste, gfn, true);
|
||||
}
|
||||
|
||||
static inline void _gmap_crstep_xchg(struct gmap *gmap, union crste *crstep, union crste ne,
|
||||
gfn_t gfn, bool needs_lock)
|
||||
static inline bool __must_check _gmap_crstep_xchg_atomic(struct gmap *gmap, union crste *crstep,
|
||||
union crste oldcrste, union crste newcrste,
|
||||
gfn_t gfn, bool needs_lock)
|
||||
{
|
||||
unsigned long align = 8 + (is_pmd(*crstep) ? 0 : 11);
|
||||
unsigned long align = is_pmd(newcrste) ? _PAGE_ENTRIES : _PAGE_ENTRIES * _CRST_ENTRIES;
|
||||
|
||||
if (KVM_BUG_ON(crstep->h.tt != oldcrste.h.tt || newcrste.h.tt != oldcrste.h.tt, gmap->kvm))
|
||||
return true;
|
||||
|
||||
lockdep_assert_held(&gmap->kvm->mmu_lock);
|
||||
if (!needs_lock)
|
||||
lockdep_assert_held(&gmap->children_lock);
|
||||
|
||||
gfn = ALIGN_DOWN(gfn, align);
|
||||
if (crste_prefix(*crstep) && (ne.h.p || ne.h.i || !crste_prefix(ne))) {
|
||||
ne.s.fc1.prefix_notif = 0;
|
||||
if (crste_prefix(oldcrste) && (newcrste.h.p || newcrste.h.i || !crste_prefix(newcrste))) {
|
||||
newcrste.s.fc1.prefix_notif = 0;
|
||||
gmap_unmap_prefix(gmap, gfn, gfn + align);
|
||||
}
|
||||
if (crste_leaf(*crstep) && crstep->s.fc1.vsie_notif &&
|
||||
(ne.h.p || ne.h.i || !ne.s.fc1.vsie_notif)) {
|
||||
ne.s.fc1.vsie_notif = 0;
|
||||
if (crste_leaf(oldcrste) && oldcrste.s.fc1.vsie_notif &&
|
||||
(newcrste.h.p || newcrste.h.i || !newcrste.s.fc1.vsie_notif)) {
|
||||
newcrste.s.fc1.vsie_notif = 0;
|
||||
if (needs_lock)
|
||||
gmap_handle_vsie_unshadow_event(gmap, gfn);
|
||||
else
|
||||
_gmap_handle_vsie_unshadow_event(gmap, gfn);
|
||||
}
|
||||
dat_crstep_xchg(crstep, ne, gfn, gmap->asce);
|
||||
if (!oldcrste.s.fc1.d && newcrste.s.fc1.d && !newcrste.s.fc1.s)
|
||||
SetPageDirty(phys_to_page(crste_origin_large(newcrste)));
|
||||
return dat_crstep_xchg_atomic(crstep, oldcrste, newcrste, gfn, gmap->asce);
|
||||
}
|
||||
|
||||
static inline void gmap_crstep_xchg(struct gmap *gmap, union crste *crstep, union crste ne,
|
||||
gfn_t gfn)
|
||||
static inline bool __must_check gmap_crstep_xchg_atomic(struct gmap *gmap, union crste *crstep,
|
||||
union crste oldcrste, union crste newcrste,
|
||||
gfn_t gfn)
|
||||
{
|
||||
return _gmap_crstep_xchg(gmap, crstep, ne, gfn, true);
|
||||
return _gmap_crstep_xchg_atomic(gmap, crstep, oldcrste, newcrste, gfn, true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -2724,6 +2724,9 @@ static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
|
|||
|
||||
bit = bit_nr + (addr % PAGE_SIZE) * 8;
|
||||
|
||||
/* kvm_set_routing_entry() should never allow this to happen */
|
||||
WARN_ON_ONCE(bit > (PAGE_SIZE * BITS_PER_BYTE - 1));
|
||||
|
||||
return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
|
||||
}
|
||||
|
||||
|
|
@ -2824,6 +2827,12 @@ void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
|
|||
int rc;
|
||||
|
||||
mci.val = mcck_info->mcic;
|
||||
|
||||
/* log machine checks being reinjected on all debugs */
|
||||
VCPU_EVENT(vcpu, 2, "guest machine check %lx", mci.val);
|
||||
KVM_EVENT(2, "guest machine check %lx", mci.val);
|
||||
pr_info("guest machine check pid %d: %lx", current->pid, mci.val);
|
||||
|
||||
if (mci.sr)
|
||||
cr14 |= CR14_RECOVERY_SUBMASK;
|
||||
if (mci.dg)
|
||||
|
|
@ -2852,6 +2861,7 @@ int kvm_set_routing_entry(struct kvm *kvm,
|
|||
struct kvm_kernel_irq_routing_entry *e,
|
||||
const struct kvm_irq_routing_entry *ue)
|
||||
{
|
||||
const struct kvm_irq_routing_s390_adapter *adapter;
|
||||
u64 uaddr_s, uaddr_i;
|
||||
int idx;
|
||||
|
||||
|
|
@ -2862,6 +2872,14 @@ int kvm_set_routing_entry(struct kvm *kvm,
|
|||
return -EINVAL;
|
||||
e->set = set_adapter_int;
|
||||
|
||||
adapter = &ue->u.adapter;
|
||||
if (adapter->summary_addr + (adapter->summary_offset / 8) >=
|
||||
(adapter->summary_addr & PAGE_MASK) + PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
if (adapter->ind_addr + (adapter->ind_offset / 8) >=
|
||||
(adapter->ind_addr & PAGE_MASK) + PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
uaddr_s = gpa_to_hva(kvm, ue->u.adapter.summary_addr);
|
||||
uaddr_i = gpa_to_hva(kvm, ue->u.adapter.ind_addr);
|
||||
|
|
|
|||
|
|
@ -4617,7 +4617,7 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
|
||||
static int vcpu_post_run(struct kvm_vcpu *vcpu, int sie_return)
|
||||
{
|
||||
struct mcck_volatile_info *mcck_info;
|
||||
struct sie_page *sie_page;
|
||||
|
|
@ -4633,14 +4633,14 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
|
|||
vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
|
||||
vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
|
||||
|
||||
if (exit_reason == -EINTR) {
|
||||
VCPU_EVENT(vcpu, 3, "%s", "machine check");
|
||||
if (sie_return == SIE64_RETURN_MCCK) {
|
||||
sie_page = container_of(vcpu->arch.sie_block,
|
||||
struct sie_page, sie_block);
|
||||
mcck_info = &sie_page->mcck_info;
|
||||
kvm_s390_reinject_machine_check(vcpu, mcck_info);
|
||||
return 0;
|
||||
}
|
||||
WARN_ON_ONCE(sie_return != SIE64_RETURN_NORMAL);
|
||||
|
||||
if (vcpu->arch.sie_block->icptcode > 0) {
|
||||
rc = kvm_handle_sie_intercept(vcpu);
|
||||
|
|
@ -4679,7 +4679,7 @@ int noinstr kvm_s390_enter_exit_sie(struct kvm_s390_sie_block *scb,
|
|||
#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
|
||||
static int __vcpu_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int rc, exit_reason;
|
||||
int rc, sie_return;
|
||||
struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
|
||||
|
||||
/*
|
||||
|
|
@ -4719,9 +4719,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
|||
guest_timing_enter_irqoff();
|
||||
__disable_cpu_timer_accounting(vcpu);
|
||||
|
||||
exit_reason = kvm_s390_enter_exit_sie(vcpu->arch.sie_block,
|
||||
vcpu->run->s.regs.gprs,
|
||||
vcpu->arch.gmap->asce.val);
|
||||
sie_return = kvm_s390_enter_exit_sie(vcpu->arch.sie_block,
|
||||
vcpu->run->s.regs.gprs,
|
||||
vcpu->arch.gmap->asce.val);
|
||||
|
||||
__enable_cpu_timer_accounting(vcpu);
|
||||
guest_timing_exit_irqoff();
|
||||
|
|
@ -4744,7 +4744,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
kvm_vcpu_srcu_read_lock(vcpu);
|
||||
|
||||
rc = vcpu_post_run(vcpu, exit_reason);
|
||||
rc = vcpu_post_run(vcpu, sie_return);
|
||||
if (rc || guestdbg_exit_pending(vcpu)) {
|
||||
kvm_vcpu_srcu_read_unlock(vcpu);
|
||||
break;
|
||||
|
|
@ -5520,9 +5520,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
}
|
||||
#endif
|
||||
case KVM_S390_VCPU_FAULT: {
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
r = vcpu_dat_fault_handler(vcpu, arg, 0);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
gpa_t gaddr = arg;
|
||||
|
||||
scoped_guard(srcu, &vcpu->kvm->srcu) {
|
||||
r = vcpu_ucontrol_translate(vcpu, &gaddr);
|
||||
if (r)
|
||||
break;
|
||||
|
||||
r = kvm_s390_faultin_gfn_simple(vcpu, NULL, gpa_to_gfn(gaddr), false);
|
||||
if (r == PGM_ADDRESSING)
|
||||
r = -EFAULT;
|
||||
if (r <= 0)
|
||||
break;
|
||||
r = -EIO;
|
||||
KVM_BUG_ON(r, vcpu->kvm);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case KVM_ENABLE_CAP:
|
||||
|
|
|
|||
|
|
@ -1122,6 +1122,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, struc
|
|||
{
|
||||
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
|
||||
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
|
||||
unsigned long sie_return = SIE64_RETURN_NORMAL;
|
||||
int guest_bp_isolation;
|
||||
int rc = 0;
|
||||
|
||||
|
|
@ -1163,7 +1164,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, struc
|
|||
goto xfer_to_guest_mode_check;
|
||||
}
|
||||
guest_timing_enter_irqoff();
|
||||
rc = kvm_s390_enter_exit_sie(scb_s, vcpu->run->s.regs.gprs, sg->asce.val);
|
||||
sie_return = kvm_s390_enter_exit_sie(scb_s, vcpu->run->s.regs.gprs, sg->asce.val);
|
||||
guest_timing_exit_irqoff();
|
||||
local_irq_enable();
|
||||
}
|
||||
|
|
@ -1178,12 +1179,13 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, struc
|
|||
|
||||
kvm_vcpu_srcu_read_lock(vcpu);
|
||||
|
||||
if (rc == -EINTR) {
|
||||
VCPU_EVENT(vcpu, 3, "%s", "machine check");
|
||||
if (sie_return == SIE64_RETURN_MCCK) {
|
||||
kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info);
|
||||
return 0;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(sie_return != SIE64_RETURN_NORMAL);
|
||||
|
||||
if (rc > 0)
|
||||
rc = 0; /* we could still have an icpt */
|
||||
else if (current->thread.gmap_int_code)
|
||||
|
|
@ -1326,7 +1328,7 @@ static void unregister_shadow_scb(struct kvm_vcpu *vcpu)
|
|||
static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
{
|
||||
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
|
||||
struct gmap *sg;
|
||||
struct gmap *sg = NULL;
|
||||
int rc = 0;
|
||||
|
||||
while (1) {
|
||||
|
|
@ -1366,6 +1368,8 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
|||
sg = gmap_put(sg);
|
||||
cond_resched();
|
||||
}
|
||||
if (sg)
|
||||
sg = gmap_put(sg);
|
||||
|
||||
if (rc == -EFAULT) {
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -441,10 +441,17 @@ void do_secure_storage_access(struct pt_regs *regs)
|
|||
folio = phys_to_folio(addr);
|
||||
if (unlikely(!folio_try_get(folio)))
|
||||
return;
|
||||
rc = arch_make_folio_accessible(folio);
|
||||
rc = uv_convert_from_secure(folio_to_phys(folio));
|
||||
if (!rc)
|
||||
clear_bit(PG_arch_1, &folio->flags.f);
|
||||
folio_put(folio);
|
||||
/*
|
||||
* There are some valid fixup types for kernel
|
||||
* accesses to donated secure memory. zeropad is one
|
||||
* of them.
|
||||
*/
|
||||
if (rc)
|
||||
BUG();
|
||||
return handle_fault_error_nolock(regs, 0);
|
||||
} else {
|
||||
if (faulthandler_disabled())
|
||||
return handle_fault_error_nolock(regs, 0);
|
||||
|
|
|
|||
|
|
@ -121,6 +121,9 @@ noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
|
|||
|
||||
WARN_ON(!irqs_disabled());
|
||||
|
||||
if (!sev_cfg.ghcbs_initialized)
|
||||
return boot_ghcb;
|
||||
|
||||
data = this_cpu_read(runtime_data);
|
||||
ghcb = &data->ghcb_page;
|
||||
|
||||
|
|
@ -164,6 +167,9 @@ noinstr void __sev_put_ghcb(struct ghcb_state *state)
|
|||
|
||||
WARN_ON(!irqs_disabled());
|
||||
|
||||
if (!sev_cfg.ghcbs_initialized)
|
||||
return;
|
||||
|
||||
data = this_cpu_read(runtime_data);
|
||||
ghcb = &data->ghcb_page;
|
||||
|
||||
|
|
|
|||
|
|
@ -177,6 +177,16 @@ static noinstr void fred_extint(struct pt_regs *regs)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
noinstr void exc_vmm_communication(struct pt_regs *regs, unsigned long error_code)
|
||||
{
|
||||
if (user_mode(regs))
|
||||
return user_exc_vmm_communication(regs, error_code);
|
||||
else
|
||||
return kernel_exc_vmm_communication(regs, error_code);
|
||||
}
|
||||
#endif
|
||||
|
||||
static noinstr void fred_hwexc(struct pt_regs *regs, unsigned long error_code)
|
||||
{
|
||||
/* Optimize for #PF. That's the only exception which matters performance wise */
|
||||
|
|
@ -207,6 +217,10 @@ static noinstr void fred_hwexc(struct pt_regs *regs, unsigned long error_code)
|
|||
#ifdef CONFIG_X86_CET
|
||||
case X86_TRAP_CP: return exc_control_protection(regs, error_code);
|
||||
#endif
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
case X86_TRAP_VC: return exc_vmm_communication(regs, error_code);
|
||||
#endif
|
||||
|
||||
default: return fred_bad_type(regs, error_code);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4855,8 +4855,10 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
|||
intel_pmu_set_acr_caused_constr(leader, idx++, cause_mask);
|
||||
|
||||
if (leader->nr_siblings) {
|
||||
for_each_sibling_event(sibling, leader)
|
||||
intel_pmu_set_acr_caused_constr(sibling, idx++, cause_mask);
|
||||
for_each_sibling_event(sibling, leader) {
|
||||
if (is_x86_event(sibling))
|
||||
intel_pmu_set_acr_caused_constr(sibling, idx++, cause_mask);
|
||||
}
|
||||
}
|
||||
|
||||
if (leader != event)
|
||||
|
|
|
|||
|
|
@ -44,6 +44,20 @@ KCOV_INSTRUMENT_unwind_orc.o := n
|
|||
KCOV_INSTRUMENT_unwind_frame.o := n
|
||||
KCOV_INSTRUMENT_unwind_guess.o := n
|
||||
|
||||
# Disable KCOV to prevent crashes during kexec: load_segments() invalidates
|
||||
# the GS base, which KCOV relies on for per-CPU data.
|
||||
#
|
||||
# As KCOV and KEXEC compatibility should be preserved (e.g. syzkaller is
|
||||
# using it to collect crash dumps during kernel fuzzing), disabling
|
||||
# KCOV for KEXEC kernels is not an option. Selectively disabling KCOV
|
||||
# instrumentation for individual affected functions can be fragile, while
|
||||
# adding more checks to KCOV would slow it down.
|
||||
#
|
||||
# As a compromise solution, disable KCOV instrumentation for the whole
|
||||
# source code file. If its coverage is ever needed, other approaches
|
||||
# should be considered.
|
||||
KCOV_INSTRUMENT_machine_kexec_64.o := n
|
||||
|
||||
CFLAGS_head32.o := -fno-stack-protector
|
||||
CFLAGS_head64.o := -fno-stack-protector
|
||||
CFLAGS_irq.o := -I $(src)/../include/asm/trace
|
||||
|
|
|
|||
|
|
@ -433,7 +433,20 @@ static __always_inline void setup_lass(struct cpuinfo_x86 *c)
|
|||
|
||||
/* These bits should not change their value after CPU init is finished. */
|
||||
static const unsigned long cr4_pinned_mask = X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP |
|
||||
X86_CR4_FSGSBASE | X86_CR4_CET | X86_CR4_FRED;
|
||||
X86_CR4_FSGSBASE | X86_CR4_CET;
|
||||
|
||||
/*
|
||||
* The CR pinning protects against ROP on the 'mov %reg, %CRn' instruction(s).
|
||||
* Since you can ROP directly to these instructions (barring shadow stack),
|
||||
* any protection must follow immediately and unconditionally after that.
|
||||
*
|
||||
* Specifically, the CR[04] write functions below will have the value
|
||||
* validation controlled by the @cr_pinning static_branch which is
|
||||
* __ro_after_init, just like the cr4_pinned_bits value.
|
||||
*
|
||||
* Once set, an attacker will have to defeat page-tables to get around these
|
||||
* restrictions. Which is a much bigger ask than 'simple' ROP.
|
||||
*/
|
||||
static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
|
||||
static unsigned long cr4_pinned_bits __ro_after_init;
|
||||
|
||||
|
|
@ -2050,12 +2063,6 @@ static void identify_cpu(struct cpuinfo_x86 *c)
|
|||
setup_umip(c);
|
||||
setup_lass(c);
|
||||
|
||||
/* Enable FSGSBASE instructions if available. */
|
||||
if (cpu_has(c, X86_FEATURE_FSGSBASE)) {
|
||||
cr4_set_bits(X86_CR4_FSGSBASE);
|
||||
elf_hwcap2 |= HWCAP2_FSGSBASE;
|
||||
}
|
||||
|
||||
/*
|
||||
* The vendor-specific functions might have changed features.
|
||||
* Now we do "generic changes."
|
||||
|
|
@ -2416,6 +2423,18 @@ void cpu_init_exception_handling(bool boot_cpu)
|
|||
/* GHCB needs to be setup to handle #VC. */
|
||||
setup_ghcb();
|
||||
|
||||
/*
|
||||
* On CPUs with FSGSBASE support, paranoid_entry() uses
|
||||
* ALTERNATIVE-patched RDGSBASE/WRGSBASE instructions. Secondary CPUs
|
||||
* boot after alternatives are patched globally, so early exceptions
|
||||
* execute patched code that depends on FSGSBASE. Enable the feature
|
||||
* before any exceptions occur.
|
||||
*/
|
||||
if (cpu_feature_enabled(X86_FEATURE_FSGSBASE)) {
|
||||
cr4_set_bits(X86_CR4_FSGSBASE);
|
||||
elf_hwcap2 |= HWCAP2_FSGSBASE;
|
||||
}
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_FRED)) {
|
||||
/* The boot CPU has enabled FRED during early boot */
|
||||
if (!boot_cpu)
|
||||
|
|
|
|||
|
|
@ -3044,12 +3044,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
|
|||
bool prefetch = !fault || fault->prefetch;
|
||||
bool write_fault = fault && fault->write;
|
||||
|
||||
if (unlikely(is_noslot_pfn(pfn))) {
|
||||
vcpu->stat.pf_mmio_spte_created++;
|
||||
mark_mmio_spte(vcpu, sptep, gfn, pte_access);
|
||||
return RET_PF_EMULATE;
|
||||
}
|
||||
|
||||
if (is_shadow_present_pte(*sptep)) {
|
||||
if (prefetch && is_last_spte(*sptep, level) &&
|
||||
pfn == spte_to_pfn(*sptep))
|
||||
|
|
@ -3066,13 +3060,22 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
|
|||
child = spte_to_child_sp(pte);
|
||||
drop_parent_pte(vcpu->kvm, child, sptep);
|
||||
flush = true;
|
||||
} else if (WARN_ON_ONCE(pfn != spte_to_pfn(*sptep))) {
|
||||
} else if (pfn != spte_to_pfn(*sptep)) {
|
||||
WARN_ON_ONCE(vcpu->arch.mmu->root_role.direct);
|
||||
drop_spte(vcpu->kvm, sptep);
|
||||
flush = true;
|
||||
} else
|
||||
was_rmapped = 1;
|
||||
}
|
||||
|
||||
if (unlikely(is_noslot_pfn(pfn))) {
|
||||
vcpu->stat.pf_mmio_spte_created++;
|
||||
mark_mmio_spte(vcpu, sptep, gfn, pte_access);
|
||||
if (flush)
|
||||
kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level);
|
||||
return RET_PF_EMULATE;
|
||||
}
|
||||
|
||||
wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
|
||||
false, host_writable, &spte);
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@ KCOV_INSTRUMENT_tlb.o := n
|
|||
KCOV_INSTRUMENT_mem_encrypt.o := n
|
||||
KCOV_INSTRUMENT_mem_encrypt_amd.o := n
|
||||
KCOV_INSTRUMENT_pgprot.o := n
|
||||
# See the "Disable KCOV" comment in arch/x86/kernel/Makefile.
|
||||
KCOV_INSTRUMENT_physaddr.o := n
|
||||
|
||||
KASAN_SANITIZE_mem_encrypt.o := n
|
||||
KASAN_SANITIZE_mem_encrypt_amd.o := n
|
||||
|
|
|
|||
|
|
@ -424,7 +424,7 @@ void __init efi_unmap_boot_services(void)
|
|||
if (efi_enabled(EFI_DBG))
|
||||
return;
|
||||
|
||||
sz = sizeof(*ranges_to_free) * efi.memmap.nr_map + 1;
|
||||
sz = sizeof(*ranges_to_free) * (efi.memmap.nr_map + 1);
|
||||
ranges_to_free = kzalloc(sz, GFP_KERNEL);
|
||||
if (!ranges_to_free) {
|
||||
pr_err("Failed to allocate storage for freeable EFI regions\n");
|
||||
|
|
|
|||
|
|
@ -28,8 +28,10 @@ static const struct software_node geode_gpio_keys_node = {
|
|||
.properties = geode_gpio_keys_props,
|
||||
};
|
||||
|
||||
static struct property_entry geode_restart_key_props[] = {
|
||||
{ /* Placeholder for GPIO property */ },
|
||||
static struct software_node_ref_args geode_restart_gpio_ref;
|
||||
|
||||
static const struct property_entry geode_restart_key_props[] = {
|
||||
PROPERTY_ENTRY_REF_ARRAY_LEN("gpios", &geode_restart_gpio_ref, 1),
|
||||
PROPERTY_ENTRY_U32("linux,code", KEY_RESTART),
|
||||
PROPERTY_ENTRY_STRING("label", "Reset button"),
|
||||
PROPERTY_ENTRY_U32("debounce-interval", 100),
|
||||
|
|
@ -64,8 +66,7 @@ int __init geode_create_restart_key(unsigned int pin)
|
|||
struct platform_device *pd;
|
||||
int err;
|
||||
|
||||
geode_restart_key_props[0] = PROPERTY_ENTRY_GPIO("gpios",
|
||||
&geode_gpiochip_node,
|
||||
geode_restart_gpio_ref = SOFTWARE_NODE_REFERENCE(&geode_gpiochip_node,
|
||||
pin, GPIO_ACTIVE_LOW);
|
||||
|
||||
err = software_node_register_node_group(geode_gpio_keys_swnodes);
|
||||
|
|
@ -99,6 +100,7 @@ int __init geode_create_leds(const char *label, const struct geode_led *leds,
|
|||
const struct software_node *group[MAX_LEDS + 2] = { 0 };
|
||||
struct software_node *swnodes;
|
||||
struct property_entry *props;
|
||||
struct software_node_ref_args *gpio_refs;
|
||||
struct platform_device_info led_info = {
|
||||
.name = "leds-gpio",
|
||||
.id = PLATFORM_DEVID_NONE,
|
||||
|
|
@ -127,6 +129,12 @@ int __init geode_create_leds(const char *label, const struct geode_led *leds,
|
|||
goto err_free_swnodes;
|
||||
}
|
||||
|
||||
gpio_refs = kzalloc_objs(*gpio_refs, n_leds);
|
||||
if (!gpio_refs) {
|
||||
err = -ENOMEM;
|
||||
goto err_free_props;
|
||||
}
|
||||
|
||||
group[0] = &geode_gpio_leds_node;
|
||||
for (i = 0; i < n_leds; i++) {
|
||||
node_name = kasprintf(GFP_KERNEL, "%s:%d", label, i);
|
||||
|
|
@ -135,9 +143,11 @@ int __init geode_create_leds(const char *label, const struct geode_led *leds,
|
|||
goto err_free_names;
|
||||
}
|
||||
|
||||
gpio_refs[i] = SOFTWARE_NODE_REFERENCE(&geode_gpiochip_node,
|
||||
leds[i].pin,
|
||||
GPIO_ACTIVE_LOW);
|
||||
props[i * 3 + 0] =
|
||||
PROPERTY_ENTRY_GPIO("gpios", &geode_gpiochip_node,
|
||||
leds[i].pin, GPIO_ACTIVE_LOW);
|
||||
PROPERTY_ENTRY_REF_ARRAY_LEN("gpios", &gpio_refs[i], 1);
|
||||
props[i * 3 + 1] =
|
||||
PROPERTY_ENTRY_STRING("linux,default-trigger",
|
||||
leds[i].default_on ?
|
||||
|
|
@ -171,6 +181,8 @@ int __init geode_create_leds(const char *label, const struct geode_led *leds,
|
|||
err_free_names:
|
||||
while (--i >= 0)
|
||||
kfree(swnodes[i].name);
|
||||
kfree(gpio_refs);
|
||||
err_free_props:
|
||||
kfree(props);
|
||||
err_free_swnodes:
|
||||
kfree(swnodes);
|
||||
|
|
|
|||
|
|
@ -623,8 +623,10 @@ static int af_alg_alloc_tsgl(struct sock *sk)
|
|||
sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
|
||||
sgl->cur = 0;
|
||||
|
||||
if (sg)
|
||||
if (sg) {
|
||||
sg_unmark_end(sg + MAX_SGL_ENTS - 1);
|
||||
sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
|
||||
}
|
||||
|
||||
list_add_tail(&sgl->list, &ctx->tsgl_list);
|
||||
}
|
||||
|
|
@ -635,15 +637,13 @@ static int af_alg_alloc_tsgl(struct sock *sk)
|
|||
/**
|
||||
* af_alg_count_tsgl - Count number of TX SG entries
|
||||
*
|
||||
* The counting starts from the beginning of the SGL to @bytes. If
|
||||
* an @offset is provided, the counting of the SG entries starts at the @offset.
|
||||
* The counting starts from the beginning of the SGL to @bytes.
|
||||
*
|
||||
* @sk: socket of connection to user space
|
||||
* @bytes: Count the number of SG entries holding given number of bytes.
|
||||
* @offset: Start the counting of SG entries from the given offset.
|
||||
* Return: Number of TX SG entries found given the constraints
|
||||
*/
|
||||
unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
|
||||
unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes)
|
||||
{
|
||||
const struct alg_sock *ask = alg_sk(sk);
|
||||
const struct af_alg_ctx *ctx = ask->private;
|
||||
|
|
@ -658,25 +658,11 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
|
|||
const struct scatterlist *sg = sgl->sg;
|
||||
|
||||
for (i = 0; i < sgl->cur; i++) {
|
||||
size_t bytes_count;
|
||||
|
||||
/* Skip offset */
|
||||
if (offset >= sg[i].length) {
|
||||
offset -= sg[i].length;
|
||||
bytes -= sg[i].length;
|
||||
continue;
|
||||
}
|
||||
|
||||
bytes_count = sg[i].length - offset;
|
||||
|
||||
offset = 0;
|
||||
sgl_count++;
|
||||
|
||||
/* If we have seen requested number of bytes, stop */
|
||||
if (bytes_count >= bytes)
|
||||
if (sg[i].length >= bytes)
|
||||
return sgl_count;
|
||||
|
||||
bytes -= bytes_count;
|
||||
bytes -= sg[i].length;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -688,19 +674,14 @@ EXPORT_SYMBOL_GPL(af_alg_count_tsgl);
|
|||
* af_alg_pull_tsgl - Release the specified buffers from TX SGL
|
||||
*
|
||||
* If @dst is non-null, reassign the pages to @dst. The caller must release
|
||||
* the pages. If @dst_offset is given only reassign the pages to @dst starting
|
||||
* at the @dst_offset (byte). The caller must ensure that @dst is large
|
||||
* enough (e.g. by using af_alg_count_tsgl with the same offset).
|
||||
* the pages.
|
||||
*
|
||||
* @sk: socket of connection to user space
|
||||
* @used: Number of bytes to pull from TX SGL
|
||||
* @dst: If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
|
||||
* caller must release the buffers in dst.
|
||||
* @dst_offset: Reassign the TX SGL from given offset. All buffers before
|
||||
* reaching the offset is released.
|
||||
*/
|
||||
void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
|
||||
size_t dst_offset)
|
||||
void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst)
|
||||
{
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct af_alg_ctx *ctx = ask->private;
|
||||
|
|
@ -725,18 +706,10 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
|
|||
* SG entries in dst.
|
||||
*/
|
||||
if (dst) {
|
||||
if (dst_offset >= plen) {
|
||||
/* discard page before offset */
|
||||
dst_offset -= plen;
|
||||
} else {
|
||||
/* reassign page to dst after offset */
|
||||
get_page(page);
|
||||
sg_set_page(dst + j, page,
|
||||
plen - dst_offset,
|
||||
sg[i].offset + dst_offset);
|
||||
dst_offset = 0;
|
||||
j++;
|
||||
}
|
||||
/* reassign page to dst after offset */
|
||||
get_page(page);
|
||||
sg_set_page(dst + j, page, plen, sg[i].offset);
|
||||
j++;
|
||||
}
|
||||
|
||||
sg[i].length -= plen;
|
||||
|
|
|
|||
|
|
@ -26,7 +26,6 @@
|
|||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/if_alg.h>
|
||||
#include <crypto/skcipher.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/kernel.h>
|
||||
|
|
@ -72,9 +71,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
|
|||
struct alg_sock *pask = alg_sk(psk);
|
||||
struct af_alg_ctx *ctx = ask->private;
|
||||
struct crypto_aead *tfm = pask->private;
|
||||
unsigned int i, as = crypto_aead_authsize(tfm);
|
||||
unsigned int as = crypto_aead_authsize(tfm);
|
||||
struct af_alg_async_req *areq;
|
||||
struct af_alg_tsgl *tsgl, *tmp;
|
||||
struct scatterlist *rsgl_src, *tsgl_src = NULL;
|
||||
int err = 0;
|
||||
size_t used = 0; /* [in] TX bufs to be en/decrypted */
|
||||
|
|
@ -154,23 +152,24 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
|
|||
outlen -= less;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a per request TX SGL for this request which tracks the
|
||||
* SG entries from the global TX SGL.
|
||||
*/
|
||||
processed = used + ctx->aead_assoclen;
|
||||
list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
|
||||
for (i = 0; i < tsgl->cur; i++) {
|
||||
struct scatterlist *process_sg = tsgl->sg + i;
|
||||
|
||||
if (!(process_sg->length) || !sg_page(process_sg))
|
||||
continue;
|
||||
tsgl_src = process_sg;
|
||||
break;
|
||||
}
|
||||
if (tsgl_src)
|
||||
break;
|
||||
}
|
||||
if (processed && !tsgl_src) {
|
||||
err = -EFAULT;
|
||||
areq->tsgl_entries = af_alg_count_tsgl(sk, processed);
|
||||
if (!areq->tsgl_entries)
|
||||
areq->tsgl_entries = 1;
|
||||
areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
|
||||
areq->tsgl_entries),
|
||||
GFP_KERNEL);
|
||||
if (!areq->tsgl) {
|
||||
err = -ENOMEM;
|
||||
goto free;
|
||||
}
|
||||
sg_init_table(areq->tsgl, areq->tsgl_entries);
|
||||
af_alg_pull_tsgl(sk, processed, areq->tsgl);
|
||||
tsgl_src = areq->tsgl;
|
||||
|
||||
/*
|
||||
* Copy of AAD from source to destination
|
||||
|
|
@ -179,76 +178,15 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
|
|||
* when user space uses an in-place cipher operation, the kernel
|
||||
* will copy the data as it does not see whether such in-place operation
|
||||
* is initiated.
|
||||
*
|
||||
* To ensure efficiency, the following implementation ensure that the
|
||||
* ciphers are invoked to perform a crypto operation in-place. This
|
||||
* is achieved by memory management specified as follows.
|
||||
*/
|
||||
|
||||
/* Use the RX SGL as source (and destination) for crypto op. */
|
||||
rsgl_src = areq->first_rsgl.sgl.sgt.sgl;
|
||||
|
||||
if (ctx->enc) {
|
||||
/*
|
||||
* Encryption operation - The in-place cipher operation is
|
||||
* achieved by the following operation:
|
||||
*
|
||||
* TX SGL: AAD || PT
|
||||
* | |
|
||||
* | copy |
|
||||
* v v
|
||||
* RX SGL: AAD || PT || Tag
|
||||
*/
|
||||
memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src,
|
||||
processed);
|
||||
af_alg_pull_tsgl(sk, processed, NULL, 0);
|
||||
} else {
|
||||
/*
|
||||
* Decryption operation - To achieve an in-place cipher
|
||||
* operation, the following SGL structure is used:
|
||||
*
|
||||
* TX SGL: AAD || CT || Tag
|
||||
* | | ^
|
||||
* | copy | | Create SGL link.
|
||||
* v v |
|
||||
* RX SGL: AAD || CT ----+
|
||||
*/
|
||||
|
||||
/* Copy AAD || CT to RX SGL buffer for in-place operation. */
|
||||
memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src, outlen);
|
||||
|
||||
/* Create TX SGL for tag and chain it to RX SGL. */
|
||||
areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
|
||||
processed - as);
|
||||
if (!areq->tsgl_entries)
|
||||
areq->tsgl_entries = 1;
|
||||
areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
|
||||
areq->tsgl_entries),
|
||||
GFP_KERNEL);
|
||||
if (!areq->tsgl) {
|
||||
err = -ENOMEM;
|
||||
goto free;
|
||||
}
|
||||
sg_init_table(areq->tsgl, areq->tsgl_entries);
|
||||
|
||||
/* Release TX SGL, except for tag data and reassign tag data. */
|
||||
af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
|
||||
|
||||
/* chain the areq TX SGL holding the tag with RX SGL */
|
||||
if (usedpages) {
|
||||
/* RX SGL present */
|
||||
struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
|
||||
struct scatterlist *sg = sgl_prev->sgt.sgl;
|
||||
|
||||
sg_unmark_end(sg + sgl_prev->sgt.nents - 1);
|
||||
sg_chain(sg, sgl_prev->sgt.nents + 1, areq->tsgl);
|
||||
} else
|
||||
/* no RX SGL present (e.g. authentication only) */
|
||||
rsgl_src = areq->tsgl;
|
||||
}
|
||||
memcpy_sglist(rsgl_src, tsgl_src, ctx->aead_assoclen);
|
||||
|
||||
/* Initialize the crypto operation */
|
||||
aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
|
||||
aead_request_set_crypt(&areq->cra_u.aead_req, tsgl_src,
|
||||
areq->first_rsgl.sgl.sgt.sgl, used, ctx->iv);
|
||||
aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
|
||||
aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
|
||||
|
|
@ -450,7 +388,7 @@ static void aead_sock_destruct(struct sock *sk)
|
|||
struct crypto_aead *tfm = pask->private;
|
||||
unsigned int ivlen = crypto_aead_ivsize(tfm);
|
||||
|
||||
af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
|
||||
af_alg_pull_tsgl(sk, ctx->used, NULL);
|
||||
sock_kzfree_s(sk, ctx->iv, ivlen);
|
||||
sock_kfree_s(sk, ctx, ctx->len);
|
||||
af_alg_release_parent(sk);
|
||||
|
|
|
|||
|
|
@ -138,7 +138,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
|||
* Create a per request TX SGL for this request which tracks the
|
||||
* SG entries from the global TX SGL.
|
||||
*/
|
||||
areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0);
|
||||
areq->tsgl_entries = af_alg_count_tsgl(sk, len);
|
||||
if (!areq->tsgl_entries)
|
||||
areq->tsgl_entries = 1;
|
||||
areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
|
||||
|
|
@ -149,7 +149,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
|||
goto free;
|
||||
}
|
||||
sg_init_table(areq->tsgl, areq->tsgl_entries);
|
||||
af_alg_pull_tsgl(sk, len, areq->tsgl, 0);
|
||||
af_alg_pull_tsgl(sk, len, areq->tsgl);
|
||||
|
||||
/* Initialize the crypto operation */
|
||||
skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
|
||||
|
|
@ -363,7 +363,7 @@ static void skcipher_sock_destruct(struct sock *sk)
|
|||
struct alg_sock *pask = alg_sk(psk);
|
||||
struct crypto_skcipher *tfm = pask->private;
|
||||
|
||||
af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
|
||||
af_alg_pull_tsgl(sk, ctx->used, NULL);
|
||||
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
|
||||
if (ctx->state)
|
||||
sock_kzfree_s(sk, ctx->state, crypto_skcipher_statesize(tfm));
|
||||
|
|
|
|||
|
|
@ -207,6 +207,7 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
|
|||
u8 *ohash = areq_ctx->tail;
|
||||
unsigned int cryptlen = req->cryptlen - authsize;
|
||||
unsigned int assoclen = req->assoclen;
|
||||
struct scatterlist *src = req->src;
|
||||
struct scatterlist *dst = req->dst;
|
||||
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
|
||||
u32 tmp[2];
|
||||
|
|
@ -214,23 +215,27 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
|
|||
if (!authsize)
|
||||
goto decrypt;
|
||||
|
||||
/* Move high-order bits of sequence number back. */
|
||||
scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
|
||||
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
|
||||
scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
|
||||
if (src == dst) {
|
||||
/* Move high-order bits of sequence number back. */
|
||||
scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
|
||||
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
|
||||
scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
|
||||
} else
|
||||
memcpy_sglist(dst, src, assoclen);
|
||||
|
||||
if (crypto_memneq(ihash, ohash, authsize))
|
||||
return -EBADMSG;
|
||||
|
||||
decrypt:
|
||||
|
||||
sg_init_table(areq_ctx->dst, 2);
|
||||
if (src != dst)
|
||||
src = scatterwalk_ffwd(areq_ctx->src, src, assoclen);
|
||||
dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
|
||||
|
||||
skcipher_request_set_tfm(skreq, ctx->enc);
|
||||
skcipher_request_set_callback(skreq, flags,
|
||||
req->base.complete, req->base.data);
|
||||
skcipher_request_set_crypt(skreq, dst, dst, cryptlen, req->iv);
|
||||
skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
|
||||
|
||||
return crypto_skcipher_decrypt(skreq);
|
||||
}
|
||||
|
|
@ -255,6 +260,7 @@ static int crypto_authenc_esn_decrypt(struct aead_request *req)
|
|||
unsigned int assoclen = req->assoclen;
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
|
||||
struct scatterlist *src = req->src;
|
||||
struct scatterlist *dst = req->dst;
|
||||
u32 tmp[2];
|
||||
int err;
|
||||
|
|
@ -262,24 +268,28 @@ static int crypto_authenc_esn_decrypt(struct aead_request *req)
|
|||
if (assoclen < 8)
|
||||
return -EINVAL;
|
||||
|
||||
cryptlen -= authsize;
|
||||
|
||||
if (req->src != dst)
|
||||
memcpy_sglist(dst, req->src, assoclen + cryptlen);
|
||||
|
||||
scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen,
|
||||
authsize, 0);
|
||||
|
||||
if (!authsize)
|
||||
goto tail;
|
||||
|
||||
/* Move high-order bits of sequence number to the end. */
|
||||
scatterwalk_map_and_copy(tmp, dst, 0, 8, 0);
|
||||
scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
|
||||
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
|
||||
cryptlen -= authsize;
|
||||
scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen,
|
||||
authsize, 0);
|
||||
|
||||
sg_init_table(areq_ctx->dst, 2);
|
||||
dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
|
||||
/* Move high-order bits of sequence number to the end. */
|
||||
scatterwalk_map_and_copy(tmp, src, 0, 8, 0);
|
||||
if (src == dst) {
|
||||
scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
|
||||
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
|
||||
dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
|
||||
} else {
|
||||
scatterwalk_map_and_copy(tmp, dst, 0, 4, 1);
|
||||
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen - 4, 4, 1);
|
||||
|
||||
src = scatterwalk_ffwd(areq_ctx->src, src, 8);
|
||||
dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
|
||||
memcpy_sglist(dst, src, assoclen + cryptlen - 8);
|
||||
dst = req->dst;
|
||||
}
|
||||
|
||||
ahash_request_set_tfm(ahreq, auth);
|
||||
ahash_request_set_crypt(ahreq, dst, ohash, assoclen + cryptlen);
|
||||
|
|
|
|||
|
|
@ -164,18 +164,21 @@ static int deflate_decompress_one(struct acomp_req *req,
|
|||
|
||||
do {
|
||||
unsigned int dcur;
|
||||
unsigned long avail_in;
|
||||
|
||||
dcur = acomp_walk_next_dst(&walk);
|
||||
if (!dcur) {
|
||||
out_of_space = true;
|
||||
break;
|
||||
}
|
||||
|
||||
stream->avail_out = dcur;
|
||||
stream->next_out = walk.dst.virt.addr;
|
||||
avail_in = stream->avail_in;
|
||||
|
||||
ret = zlib_inflate(stream, Z_NO_FLUSH);
|
||||
|
||||
if (!dcur && avail_in == stream->avail_in) {
|
||||
out_of_space = true;
|
||||
break;
|
||||
}
|
||||
|
||||
dcur -= stream->avail_out;
|
||||
acomp_walk_done_dst(&walk, dcur);
|
||||
} while (ret == Z_OK && stream->avail_in);
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@
|
|||
#define IVPU_HW_IP_60XX 60
|
||||
|
||||
#define IVPU_HW_IP_REV_LNL_B0 4
|
||||
#define IVPU_HW_IP_REV_NVL_A0 0
|
||||
|
||||
#define IVPU_HW_BTRS_MTL 1
|
||||
#define IVPU_HW_BTRS_LNL 2
|
||||
|
|
|
|||
|
|
@ -70,8 +70,10 @@ static void wa_init(struct ivpu_device *vdev)
|
|||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
vdev->wa.interrupt_clear_with_0 = ivpu_hw_btrs_irqs_clear_with_0_mtl(vdev);
|
||||
|
||||
if (ivpu_device_id(vdev) == PCI_DEVICE_ID_LNL &&
|
||||
ivpu_revision(vdev) < IVPU_HW_IP_REV_LNL_B0)
|
||||
if ((ivpu_device_id(vdev) == PCI_DEVICE_ID_LNL &&
|
||||
ivpu_revision(vdev) < IVPU_HW_IP_REV_LNL_B0) ||
|
||||
(ivpu_device_id(vdev) == PCI_DEVICE_ID_NVL &&
|
||||
ivpu_revision(vdev) == IVPU_HW_IP_REV_NVL_A0))
|
||||
vdev->wa.disable_clock_relinquish = true;
|
||||
|
||||
if (ivpu_test_mode & IVPU_TEST_MODE_CLK_RELINQ_ENABLE)
|
||||
|
|
|
|||
|
|
@ -914,7 +914,7 @@ static int decode_deactivate(struct qaic_device *qdev, void *trans, u32 *msg_len
|
|||
*/
|
||||
return -ENODEV;
|
||||
|
||||
if (status) {
|
||||
if (usr && status) {
|
||||
/*
|
||||
* Releasing resources failed on the device side, which puts
|
||||
* us in a bind since they may still be in use, so enable the
|
||||
|
|
@ -1109,6 +1109,9 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u
|
|||
mutex_lock(&qdev->cntl_mutex);
|
||||
if (!list_empty(&elem.list))
|
||||
list_del(&elem.list);
|
||||
/* resp_worker() processed the response but the wait was interrupted */
|
||||
else if (ret == -ERESTARTSYS)
|
||||
ret = 0;
|
||||
if (!ret && !elem.buf)
|
||||
ret = -ETIMEDOUT;
|
||||
else if (ret > 0 && !elem.buf)
|
||||
|
|
@ -1419,9 +1422,49 @@ static void resp_worker(struct work_struct *work)
|
|||
}
|
||||
mutex_unlock(&qdev->cntl_mutex);
|
||||
|
||||
if (!found)
|
||||
if (!found) {
|
||||
/*
|
||||
* The user might have gone away at this point without waiting
|
||||
* for QAIC_TRANS_DEACTIVATE_FROM_DEV transaction coming from
|
||||
* the device. If this is not handled correctly, the host will
|
||||
* not know that the DBC[n] has been freed on the device.
|
||||
* Due to this failure in synchronization between the device and
|
||||
* the host, if another user requests to activate a network, and
|
||||
* the device assigns DBC[n] again, save_dbc_buf() will hang,
|
||||
* waiting for dbc[n]->in_use to be set to false, which will not
|
||||
* happen unless the qaic_dev_reset_clean_local_state() gets
|
||||
* called by resetting the device (or re-inserting the module).
|
||||
*
|
||||
* As a solution, we look for QAIC_TRANS_DEACTIVATE_FROM_DEV
|
||||
* transactions in the message before disposing of it, then
|
||||
* handle releasing the DBC resources.
|
||||
*
|
||||
* Since the user has gone away, if the device could not
|
||||
* deactivate the network (status != 0), there is no way to
|
||||
* enable and reassign the DBC to the user. We can put trust in
|
||||
* the device that it will release all the active DBCs in
|
||||
* response to the QAIC_TRANS_TERMINATE_TO_DEV transaction,
|
||||
* otherwise, the user can issue an soc_reset to the device.
|
||||
*/
|
||||
u32 msg_count = le32_to_cpu(msg->hdr.count);
|
||||
u32 msg_len = le32_to_cpu(msg->hdr.len);
|
||||
u32 len = 0;
|
||||
int j;
|
||||
|
||||
for (j = 0; j < msg_count && len < msg_len; ++j) {
|
||||
struct wire_trans_hdr *trans_hdr;
|
||||
|
||||
trans_hdr = (struct wire_trans_hdr *)(msg->data + len);
|
||||
if (le32_to_cpu(trans_hdr->type) == QAIC_TRANS_DEACTIVATE_FROM_DEV) {
|
||||
if (decode_deactivate(qdev, trans_hdr, &len, NULL))
|
||||
len += le32_to_cpu(trans_hdr->len);
|
||||
} else {
|
||||
len += le32_to_cpu(trans_hdr->len);
|
||||
}
|
||||
}
|
||||
/* request must have timed out, drop packet */
|
||||
kfree(msg);
|
||||
}
|
||||
|
||||
kfree(resp);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1656,6 +1656,8 @@ static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device, bool ca
|
|||
|
||||
ret = ec_install_handlers(ec, device, call_reg);
|
||||
if (ret) {
|
||||
ec_remove_handlers(ec);
|
||||
|
||||
if (ec == first_ec)
|
||||
first_ec = NULL;
|
||||
|
||||
|
|
|
|||
|
|
@ -263,6 +263,13 @@ static int rimt_iommu_xlate(struct device *dev, struct acpi_rimt_node *node, u32
|
|||
if (!rimt_fwnode)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
/*
|
||||
* EPROBE_DEFER ensures IOMMU is probed before the devices that
|
||||
* depend on them. During shutdown, however, the IOMMU may be removed
|
||||
* first, leading to issues. To avoid this, a device link is added
|
||||
* which enforces the correct removal order.
|
||||
*/
|
||||
device_link_add(dev, rimt_fwnode->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
|
||||
return acpi_iommu_fwspec_init(dev, deviceid, rimt_fwnode);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -13,6 +13,8 @@
|
|||
//
|
||||
// The shrinker will use trylock methods because it locks them in a different order.
|
||||
|
||||
use crate::AssertSync;
|
||||
|
||||
use core::{
|
||||
marker::PhantomPinned,
|
||||
mem::{size_of, size_of_val, MaybeUninit},
|
||||
|
|
@ -143,14 +145,14 @@ pub(crate) struct ShrinkablePageRange {
|
|||
}
|
||||
|
||||
// We do not define any ops. For now, used only to check identity of vmas.
|
||||
static BINDER_VM_OPS: bindings::vm_operations_struct = pin_init::zeroed();
|
||||
static BINDER_VM_OPS: AssertSync<bindings::vm_operations_struct> = AssertSync(pin_init::zeroed());
|
||||
|
||||
// To ensure that we do not accidentally install pages into or zap pages from the wrong vma, we
|
||||
// check its vm_ops and private data before using it.
|
||||
fn check_vma(vma: &virt::VmaRef, owner: *const ShrinkablePageRange) -> Option<&virt::VmaMixedMap> {
|
||||
// SAFETY: Just reading the vm_ops pointer of any active vma is safe.
|
||||
let vm_ops = unsafe { (*vma.as_ptr()).vm_ops };
|
||||
if !ptr::eq(vm_ops, &BINDER_VM_OPS) {
|
||||
if !ptr::eq(vm_ops, &BINDER_VM_OPS.0) {
|
||||
return None;
|
||||
}
|
||||
|
||||
|
|
@ -342,7 +344,7 @@ pub(crate) fn register_with_vma(&self, vma: &virt::VmaNew) -> Result<usize> {
|
|||
|
||||
// SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on
|
||||
// `vm_ops`.
|
||||
unsafe { (*vma.as_ptr()).vm_ops = &BINDER_VM_OPS };
|
||||
unsafe { (*vma.as_ptr()).vm_ops = &BINDER_VM_OPS.0 };
|
||||
|
||||
Ok(num_pages)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -306,7 +306,7 @@ fn init(_module: &'static kernel::ThisModule) -> Result<Self> {
|
|||
/// Makes the inner type Sync.
|
||||
#[repr(transparent)]
|
||||
pub struct AssertSync<T>(T);
|
||||
// SAFETY: Used only to insert `file_operations` into a global, which is safe.
|
||||
// SAFETY: Used only to insert C bindings types into globals, which is safe.
|
||||
unsafe impl<T> Sync for AssertSync<T> {}
|
||||
|
||||
/// File operations that rust_binderfs.c can use.
|
||||
|
|
|
|||
|
|
@ -99,8 +99,13 @@ static int lcd2s_print(struct charlcd *lcd, int c)
|
|||
{
|
||||
struct lcd2s_data *lcd2s = lcd->drvdata;
|
||||
u8 buf[2] = { LCD2S_CMD_WRITE, c };
|
||||
int ret;
|
||||
|
||||
lcd2s_i2c_master_send(lcd2s->i2c, buf, sizeof(buf));
|
||||
ret = lcd2s_i2c_master_send(lcd2s->i2c, buf, sizeof(buf));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret != sizeof(buf))
|
||||
return -EIO;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -108,9 +113,13 @@ static int lcd2s_gotoxy(struct charlcd *lcd, unsigned int x, unsigned int y)
|
|||
{
|
||||
struct lcd2s_data *lcd2s = lcd->drvdata;
|
||||
u8 buf[3] = { LCD2S_CMD_CUR_POS, y + 1, x + 1 };
|
||||
int ret;
|
||||
|
||||
lcd2s_i2c_master_send(lcd2s->i2c, buf, sizeof(buf));
|
||||
|
||||
ret = lcd2s_i2c_master_send(lcd2s->i2c, buf, sizeof(buf));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret != sizeof(buf))
|
||||
return -EIO;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -365,7 +365,7 @@ static DEFINE_IDA(linedisp_id);
|
|||
|
||||
static void linedisp_release(struct device *dev)
|
||||
{
|
||||
struct linedisp *linedisp = to_linedisp(dev);
|
||||
struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
|
||||
|
||||
kfree(linedisp->map);
|
||||
kfree(linedisp->message);
|
||||
|
|
|
|||
|
|
@ -1545,6 +1545,7 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
|
|||
unsigned int val_num)
|
||||
{
|
||||
void *orig_work_buf;
|
||||
unsigned int selector_reg;
|
||||
unsigned int win_offset;
|
||||
unsigned int win_page;
|
||||
bool page_chg;
|
||||
|
|
@ -1563,10 +1564,31 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* It is possible to have selector register inside data window.
|
||||
In that case, selector register is located on every page and
|
||||
it needs no page switching, when accessed alone. */
|
||||
/*
|
||||
* Calculate the address of the selector register in the corresponding
|
||||
* data window if it is located on every page.
|
||||
*/
|
||||
page_chg = in_range(range->selector_reg, range->window_start, range->window_len);
|
||||
if (page_chg)
|
||||
selector_reg = range->range_min + win_page * range->window_len +
|
||||
range->selector_reg - range->window_start;
|
||||
|
||||
/*
|
||||
* It is possible to have selector register inside data window.
|
||||
* In that case, selector register is located on every page and it
|
||||
* needs no page switching, when accessed alone.
|
||||
*
|
||||
* Nevertheless we should synchronize the cache values for it.
|
||||
* This can't be properly achieved if the selector register is
|
||||
* the first and the only one to be read inside the data window.
|
||||
* That's why we update it in that case as well.
|
||||
*
|
||||
* However, we specifically avoid updating it for the default page,
|
||||
* when it's overlapped with the real data window, to prevent from
|
||||
* infinite looping.
|
||||
*/
|
||||
if (val_num > 1 ||
|
||||
(page_chg && selector_reg != range->selector_reg) ||
|
||||
range->window_start + win_offset != range->selector_reg) {
|
||||
/* Use separate work_buf during page switching */
|
||||
orig_work_buf = map->work_buf;
|
||||
|
|
@ -1575,7 +1597,7 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
|
|||
ret = _regmap_update_bits(map, range->selector_reg,
|
||||
range->selector_mask,
|
||||
win_page << range->selector_shift,
|
||||
&page_chg, false);
|
||||
NULL, false);
|
||||
|
||||
map->work_buf = orig_work_buf;
|
||||
|
||||
|
|
|
|||
|
|
@ -917,9 +917,8 @@ static void zram_account_writeback_submit(struct zram *zram)
|
|||
|
||||
static int zram_writeback_complete(struct zram *zram, struct zram_wb_req *req)
|
||||
{
|
||||
u32 size, index = req->pps->index;
|
||||
int err, prio;
|
||||
bool huge;
|
||||
u32 index = req->pps->index;
|
||||
int err;
|
||||
|
||||
err = blk_status_to_errno(req->bio.bi_status);
|
||||
if (err) {
|
||||
|
|
@ -946,28 +945,13 @@ static int zram_writeback_complete(struct zram *zram, struct zram_wb_req *req)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (zram->compressed_wb) {
|
||||
/*
|
||||
* ZRAM_WB slots get freed, we need to preserve data required
|
||||
* for read decompression.
|
||||
*/
|
||||
size = get_slot_size(zram, index);
|
||||
prio = get_slot_comp_priority(zram, index);
|
||||
huge = test_slot_flag(zram, index, ZRAM_HUGE);
|
||||
}
|
||||
|
||||
slot_free(zram, index);
|
||||
set_slot_flag(zram, index, ZRAM_WB);
|
||||
clear_slot_flag(zram, index, ZRAM_IDLE);
|
||||
if (test_slot_flag(zram, index, ZRAM_HUGE))
|
||||
atomic64_dec(&zram->stats.huge_pages);
|
||||
atomic64_sub(get_slot_size(zram, index), &zram->stats.compr_data_size);
|
||||
zs_free(zram->mem_pool, get_slot_handle(zram, index));
|
||||
set_slot_handle(zram, index, req->blk_idx);
|
||||
|
||||
if (zram->compressed_wb) {
|
||||
if (huge)
|
||||
set_slot_flag(zram, index, ZRAM_HUGE);
|
||||
set_slot_size(zram, index, size);
|
||||
set_slot_comp_priority(zram, index, prio);
|
||||
}
|
||||
|
||||
atomic64_inc(&zram->stats.pages_stored);
|
||||
set_slot_flag(zram, index, ZRAM_WB);
|
||||
|
||||
out:
|
||||
slot_unlock(zram, index);
|
||||
|
|
@ -2010,8 +1994,13 @@ static void slot_free(struct zram *zram, u32 index)
|
|||
set_slot_comp_priority(zram, index, 0);
|
||||
|
||||
if (test_slot_flag(zram, index, ZRAM_HUGE)) {
|
||||
/*
|
||||
* Writeback completion decrements ->huge_pages but keeps
|
||||
* ZRAM_HUGE flag for deferred decompression path.
|
||||
*/
|
||||
if (!test_slot_flag(zram, index, ZRAM_WB))
|
||||
atomic64_dec(&zram->stats.huge_pages);
|
||||
clear_slot_flag(zram, index, ZRAM_HUGE);
|
||||
atomic64_dec(&zram->stats.huge_pages);
|
||||
}
|
||||
|
||||
if (test_slot_flag(zram, index, ZRAM_WB)) {
|
||||
|
|
|
|||
|
|
@ -251,11 +251,13 @@ void btintel_hw_error(struct hci_dev *hdev, u8 code)
|
|||
|
||||
bt_dev_err(hdev, "Hardware error 0x%2.2x", code);
|
||||
|
||||
hci_req_sync_lock(hdev);
|
||||
|
||||
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
|
||||
if (IS_ERR(skb)) {
|
||||
bt_dev_err(hdev, "Reset after hardware error failed (%ld)",
|
||||
PTR_ERR(skb));
|
||||
return;
|
||||
goto unlock;
|
||||
}
|
||||
kfree_skb(skb);
|
||||
|
||||
|
|
@ -263,18 +265,21 @@ void btintel_hw_error(struct hci_dev *hdev, u8 code)
|
|||
if (IS_ERR(skb)) {
|
||||
bt_dev_err(hdev, "Retrieving Intel exception info failed (%ld)",
|
||||
PTR_ERR(skb));
|
||||
return;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (skb->len != 13) {
|
||||
bt_dev_err(hdev, "Exception info size mismatch");
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
bt_dev_err(hdev, "Exception info %s", (char *)(skb->data + 1));
|
||||
|
||||
kfree_skb(skb);
|
||||
|
||||
unlock:
|
||||
hci_req_sync_unlock(hdev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btintel_hw_error);
|
||||
|
||||
|
|
|
|||
|
|
@ -2376,8 +2376,11 @@ static void btusb_work(struct work_struct *work)
|
|||
if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_CVSD) {
|
||||
if (hdev->voice_setting & 0x0020) {
|
||||
static const int alts[3] = { 2, 4, 5 };
|
||||
unsigned int sco_idx;
|
||||
|
||||
new_alts = alts[data->sco_num - 1];
|
||||
sco_idx = min_t(unsigned int, data->sco_num - 1,
|
||||
ARRAY_SIZE(alts) - 1);
|
||||
new_alts = alts[sco_idx];
|
||||
} else {
|
||||
new_alts = data->sco_num;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -109,9 +109,6 @@ static int h4_recv(struct hci_uart *hu, const void *data, int count)
|
|||
{
|
||||
struct h4_struct *h4 = hu->priv;
|
||||
|
||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||
return -EUNATCH;
|
||||
|
||||
h4->rx_skb = h4_recv_buf(hu, h4->rx_skb, data, count,
|
||||
h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts));
|
||||
if (IS_ERR(h4->rx_skb)) {
|
||||
|
|
|
|||
|
|
@ -541,6 +541,8 @@ static int download_firmware(struct ll_device *lldev)
|
|||
if (err || !fw->data || !fw->size) {
|
||||
bt_dev_err(lldev->hu.hdev, "request_firmware failed(errno %d) for %s",
|
||||
err, bts_scr_name);
|
||||
if (!err)
|
||||
release_firmware(fw);
|
||||
return -EINVAL;
|
||||
}
|
||||
ptr = (void *)fw->data;
|
||||
|
|
|
|||
|
|
@ -793,13 +793,15 @@ static void do_become_nonbusy(struct comedi_device *dev,
|
|||
__comedi_clear_subdevice_runflags(s, COMEDI_SRF_RUNNING |
|
||||
COMEDI_SRF_BUSY);
|
||||
spin_unlock_irqrestore(&s->spin_lock, flags);
|
||||
if (comedi_is_runflags_busy(runflags)) {
|
||||
if (async) {
|
||||
/*
|
||||
* "Run active" counter was set to 1 when setting up the
|
||||
* command. Decrement it and wait for it to become 0.
|
||||
*/
|
||||
comedi_put_is_subdevice_running(s);
|
||||
wait_for_completion(&async->run_complete);
|
||||
if (comedi_is_runflags_busy(runflags)) {
|
||||
comedi_put_is_subdevice_running(s);
|
||||
wait_for_completion(&async->run_complete);
|
||||
}
|
||||
comedi_buf_reset(s);
|
||||
async->inttrig = NULL;
|
||||
kfree(async->cmd.chanlist);
|
||||
|
|
|
|||
|
|
@ -1063,6 +1063,14 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
|
|||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_LOCKDEP)) {
|
||||
/*
|
||||
* dev->spinlock is for private use by the attached low-level
|
||||
* driver. Reinitialize it to stop lock-dependency tracking
|
||||
* between attachments to different low-level drivers.
|
||||
*/
|
||||
spin_lock_init(&dev->spinlock);
|
||||
}
|
||||
dev->driver = driv;
|
||||
dev->board_name = dev->board_ptr ? *(const char **)dev->board_ptr
|
||||
: dev->driver->driver_name;
|
||||
|
|
|
|||
|
|
@ -175,6 +175,18 @@ static int dt2815_attach(struct comedi_device *dev, struct comedi_devconfig *it)
|
|||
? current_range_type : voltage_range_type;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if hardware is present before attempting any I/O operations.
|
||||
* Reading 0xff from status register typically indicates no hardware
|
||||
* on the bus (floating bus reads as all 1s).
|
||||
*/
|
||||
if (inb(dev->iobase + DT2815_STATUS) == 0xff) {
|
||||
dev_err(dev->class_dev,
|
||||
"No hardware detected at I/O base 0x%lx\n",
|
||||
dev->iobase);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Init the 2815 */
|
||||
outb(0x00, dev->iobase + DT2815_STATUS);
|
||||
for (i = 0; i < 100; i++) {
|
||||
|
|
|
|||
|
|
@ -315,6 +315,18 @@ static int me4000_xilinx_download(struct comedi_device *dev,
|
|||
unsigned int val;
|
||||
unsigned int i;
|
||||
|
||||
/* Get data stream length from header. */
|
||||
if (size >= 4) {
|
||||
file_length = (((unsigned int)data[0] & 0xff) << 24) +
|
||||
(((unsigned int)data[1] & 0xff) << 16) +
|
||||
(((unsigned int)data[2] & 0xff) << 8) +
|
||||
((unsigned int)data[3] & 0xff);
|
||||
}
|
||||
if (size < 16 || file_length > size - 16) {
|
||||
dev_err(dev->class_dev, "Firmware length inconsistency\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!xilinx_iobase)
|
||||
return -ENODEV;
|
||||
|
||||
|
|
@ -346,10 +358,6 @@ static int me4000_xilinx_download(struct comedi_device *dev,
|
|||
outl(val, devpriv->plx_regbase + PLX9052_CNTRL);
|
||||
|
||||
/* Download Xilinx firmware */
|
||||
file_length = (((unsigned int)data[0] & 0xff) << 24) +
|
||||
(((unsigned int)data[1] & 0xff) << 16) +
|
||||
(((unsigned int)data[2] & 0xff) << 8) +
|
||||
((unsigned int)data[3] & 0xff);
|
||||
usleep_range(10, 1000);
|
||||
|
||||
for (i = 0; i < file_length; i++) {
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user