summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjc_gargma <jc_gargma@iserlohn-fortress.net>2023-01-23 17:27:55 -0800
committerjc_gargma <jc_gargma@iserlohn-fortress.net>2023-01-23 17:27:55 -0800
commit72a92e95495160e668598f355f17e7eaea820807 (patch)
treea81d3c382d1ac07c1af0409c2b067e8b4f2f7fd1
parentUpdated to 6.0.19 (diff)
downloadlinux-72a92e95495160e668598f355f17e7eaea820807.tar.xz
Updated to 6.1.7
-rw-r--r--0001-ZEN-Add-sysctl-and-CONFIG-to-disallow-unprivileged-C.patch38
-rw-r--r--0002-Revert-drm-display-dp_mst-Move-all-payload-info-into.patch2479
-rw-r--r--0003-btrfs-fix-invalid-leaf-access-due-to-inline-extent-d.patch67
-rw-r--r--0003-drm-sched-add-DRM_SCHED_FENCE_DONT_PIPELINE-flag.patch55
-rw-r--r--0004-drm-amdgpu-use-DRM_SCHED_FENCE_DONT_PIPELINE-for-VM-.patch41
-rw-r--r--0004-wifi-mac80211-fix-initialization-of-rx-link-and-rx-l.patch459
-rw-r--r--PKGBUILD44
-rw-r--r--config334
8 files changed, 3252 insertions, 265 deletions
diff --git a/0001-ZEN-Add-sysctl-and-CONFIG-to-disallow-unprivileged-C.patch b/0001-ZEN-Add-sysctl-and-CONFIG-to-disallow-unprivileged-C.patch
index 3ff5f1e..f9ba667 100644
--- a/0001-ZEN-Add-sysctl-and-CONFIG-to-disallow-unprivileged-C.patch
+++ b/0001-ZEN-Add-sysctl-and-CONFIG-to-disallow-unprivileged-C.patch
@@ -1,7 +1,7 @@
-From 400ac2ce023cd844f421a9781024fd89173388ac Mon Sep 17 00:00:00 2001
+From 9bec448ab7f53d02d88aaf0dd5d3683fb163a54c Mon Sep 17 00:00:00 2001
From: "Jan Alexander Steffens (heftig)" <jan.steffens@gmail.com>
Date: Mon, 16 Sep 2019 04:53:20 +0200
-Subject: [PATCH 1/6] ZEN: Add sysctl and CONFIG to disallow unprivileged
+Subject: [PATCH 1/5] ZEN: Add sysctl and CONFIG to disallow unprivileged
CLONE_NEWUSER
Our default behavior continues to match the vanilla kernel.
@@ -14,10 +14,10 @@ Our default behavior continues to match the vanilla kernel.
5 files changed, 53 insertions(+)
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
-index 33a4240e6a6f..82213f9c4c17 100644
+index 45f09bec02c4..87b20e2ee274 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
-@@ -139,6 +139,8 @@ static inline void set_rlimit_ucount_max(struct user_namespace *ns,
+@@ -148,6 +148,8 @@ static inline void set_userns_rlimit_max(struct user_namespace *ns,
#ifdef CONFIG_USER_NS
@@ -26,7 +26,7 @@ index 33a4240e6a6f..82213f9c4c17 100644
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
{
if (ns)
-@@ -172,6 +174,8 @@ extern bool current_in_userns(const struct user_namespace *target_ns);
+@@ -181,6 +183,8 @@ extern bool current_in_userns(const struct user_namespace *target_ns);
struct ns_common *ns_get_owner(struct ns_common *ns);
#else
@@ -36,10 +36,10 @@ index 33a4240e6a6f..82213f9c4c17 100644
{
return &init_user_ns;
diff --git a/init/Kconfig b/init/Kconfig
-index d1d779d6ba43..bd90c221090d 100644
+index 0c214af99085..d9ae969eae32 100644
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1241,6 +1241,22 @@ config USER_NS
+@@ -1251,6 +1251,22 @@ config USER_NS
If unsure, say N.
@@ -63,12 +63,12 @@ index d1d779d6ba43..bd90c221090d 100644
bool "PID Namespaces"
default y
diff --git a/kernel/fork.c b/kernel/fork.c
-index 2b6bd511c6ed..704fe6bc9cb4 100644
+index 844dfdc8c639..31d41db3f84d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -99,6 +99,10 @@
+@@ -98,6 +98,10 @@
+ #include <linux/io_uring.h>
#include <linux/bpf.h>
- #include <linux/sched/mm.h>
+#ifdef CONFIG_USER_NS
+#include <linux/user_namespace.h>
@@ -77,7 +77,7 @@ index 2b6bd511c6ed..704fe6bc9cb4 100644
#include <asm/pgalloc.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
-@@ -2009,6 +2013,10 @@ static __latent_entropy struct task_struct *copy_process(
+@@ -2011,6 +2015,10 @@ static __latent_entropy struct task_struct *copy_process(
if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
return ERR_PTR(-EINVAL);
@@ -88,7 +88,7 @@ index 2b6bd511c6ed..704fe6bc9cb4 100644
/*
* Thread groups must share signals as well, and detached threads
* can only be started up within the thread group.
-@@ -3159,6 +3167,12 @@ int ksys_unshare(unsigned long unshare_flags)
+@@ -3171,6 +3179,12 @@ int ksys_unshare(unsigned long unshare_flags)
if (unshare_flags & CLONE_NEWNS)
unshare_flags |= CLONE_FS;
@@ -102,7 +102,7 @@ index 2b6bd511c6ed..704fe6bc9cb4 100644
if (err)
goto bad_unshare_out;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index 205d605cacc5..d7247ec7ddda 100644
+index c6d9dec11b74..9a4514ad481b 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -81,6 +81,9 @@
@@ -113,9 +113,9 @@ index 205d605cacc5..d7247ec7ddda 100644
+#include <linux/user_namespace.h>
+#endif
- #if defined(CONFIG_SYSCTL)
-
-@@ -1649,6 +1652,15 @@ static struct ctl_table kern_table[] = {
+ /* shared constants to be used in various sysctls */
+ const int sysctl_vals[] = { 0, 1, 2, 3, 4, 100, 200, 1000, 3000, INT_MAX, 65535, -1 };
+@@ -1659,6 +1662,15 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
@@ -132,10 +132,10 @@ index 205d605cacc5..d7247ec7ddda 100644
{
.procname = "tainted",
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
-index 5481ba44a8d6..423ab2563ad7 100644
+index 54211dbd516c..16ca0c151629 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
-@@ -21,6 +21,13 @@
+@@ -22,6 +22,13 @@
#include <linux/bsearch.h>
#include <linux/sort.h>
@@ -150,5 +150,5 @@ index 5481ba44a8d6..423ab2563ad7 100644
static DEFINE_MUTEX(userns_state_mutex);
--
-2.38.1
+2.39.0
diff --git a/0002-Revert-drm-display-dp_mst-Move-all-payload-info-into.patch b/0002-Revert-drm-display-dp_mst-Move-all-payload-info-into.patch
new file mode 100644
index 0000000..3335f6a
--- /dev/null
+++ b/0002-Revert-drm-display-dp_mst-Move-all-payload-info-into.patch
@@ -0,0 +1,2479 @@
+From 96c2a11329533bb221e884bc8b91c6fc2f305dd8 Mon Sep 17 00:00:00 2001
+From: Wayne Lin <Wayne.Lin@amd.com>
+Date: Thu, 12 Jan 2023 16:50:44 +0800
+Subject: [PATCH 2/5] Revert "drm/display/dp_mst: Move all payload info into
+ the atomic state"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This reverts commit 4d07b0bc403403438d9cf88450506240c5faf92f.
+
+[Why]
+Changes cause regression on amdgpu mst.
+E.g.
+In fill_dc_mst_payload_table_from_drm(), amdgpu expects to add/remove payload
+one by one and call fill_dc_mst_payload_table_from_drm() to update the HW
+maintained payload table. But previous change tries to go through all the
+payloads in mst_state and update amdpug hw maintained table in once everytime
+driver only tries to add/remove a specific payload stream only. The newly
+design idea conflicts with the implementation in amdgpu nowadays.
+
+[How]
+Revert this patch first. After addressing all regression problems caused by
+this previous patch, will add it back and adjust it.
+
+Signed-off-by: Wayne Lin <Wayne.Lin@amd.com>
+Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2171
+Cc: stable@vger.kernel.org # 6.1
+Cc: Lyude Paul <lyude@redhat.com>
+Cc: Harry Wentland <harry.wentland@amd.com>
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: Ben Skeggs <bskeggs@redhat.com>
+Cc: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
+Cc: Fangzhi Zuo <Jerry.Zuo@amd.com>
+Cherry-picked-for: https://bugs.archlinux.org/task/76934
+---
+ .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 53 +-
+ .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 104 ++-
+ .../display/amdgpu_dm/amdgpu_dm_mst_types.c | 87 ++-
+ .../amd/display/include/link_service_types.h | 3 -
+ drivers/gpu/drm/display/drm_dp_mst_topology.c | 724 ++++++++++++------
+ drivers/gpu/drm/i915/display/intel_dp_mst.c | 64 +-
+ drivers/gpu/drm/i915/display/intel_hdcp.c | 24 +-
+ drivers/gpu/drm/nouveau/dispnv50/disp.c | 169 ++--
+ include/drm/display/drm_dp_mst_helper.h | 178 +++--
+ 9 files changed, 876 insertions(+), 530 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index dacad8b85963..40defd664b49 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -6460,7 +6460,6 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct drm_dp_mst_port *mst_port;
+- struct drm_dp_mst_topology_state *mst_state;
+ enum dc_color_depth color_depth;
+ int clock, bpp = 0;
+ bool is_y420 = false;
+@@ -6474,13 +6473,6 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
+ return 0;
+
+- mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr);
+- if (IS_ERR(mst_state))
+- return PTR_ERR(mst_state);
+-
+- if (!mst_state->pbn_div)
+- mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_port->dc_link);
+-
+ if (!state->duplicated) {
+ int max_bpc = conn_state->max_requested_bpc;
+ is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
+@@ -6492,10 +6484,11 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ clock = adjusted_mode->clock;
+ dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
+ }
+-
+- dm_new_connector_state->vcpi_slots =
+- drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port,
+- dm_new_connector_state->pbn);
++ dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_time_slots(state,
++ mst_mgr,
++ mst_port,
++ dm_new_connector_state->pbn,
++ dm_mst_get_pbn_divider(aconnector->dc_link));
+ if (dm_new_connector_state->vcpi_slots < 0) {
+ DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
+ return dm_new_connector_state->vcpi_slots;
+@@ -6566,14 +6559,17 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
+ dm_conn_state->vcpi_slots = slot_num;
+
+ ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->port,
+- dm_conn_state->pbn, false);
++ dm_conn_state->pbn, 0, false);
+ if (ret < 0)
+ return ret;
+
+ continue;
+ }
+
+- vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, true);
++ vcpi = drm_dp_mst_atomic_enable_dsc(state,
++ aconnector->port,
++ pbn, pbn_div,
++ true);
+ if (vcpi < 0)
+ return vcpi;
+
+@@ -9407,6 +9403,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dsc_mst_fairness_vars vars[MAX_PIPES];
++ struct drm_dp_mst_topology_state *mst_state;
++ struct drm_dp_mst_topology_mgr *mgr;
+ #endif
+
+ trace_amdgpu_dm_atomic_check_begin(state);
+@@ -9654,6 +9652,33 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ lock_and_validation_needed = true;
+ }
+
++#if defined(CONFIG_DRM_AMD_DC_DCN)
++ /* set the slot info for each mst_state based on the link encoding format */
++ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
++ struct amdgpu_dm_connector *aconnector;
++ struct drm_connector *connector;
++ struct drm_connector_list_iter iter;
++ u8 link_coding_cap;
++
++ if (!mgr->mst_state )
++ continue;
++
++ drm_connector_list_iter_begin(dev, &iter);
++ drm_for_each_connector_iter(connector, &iter) {
++ int id = connector->index;
++
++ if (id == mst_state->mgr->conn_base_id) {
++ aconnector = to_amdgpu_dm_connector(connector);
++ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
++ drm_dp_mst_update_slots(mst_state, link_coding_cap);
++
++ break;
++ }
++ }
++ drm_connector_list_iter_end(&iter);
++
++ }
++#endif
+ /**
+ * Streams and planes are reset when there are changes that affect
+ * bandwidth. Anything that affects bandwidth needs to go through
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index f72c013d3a5b..c8f9d10fde17 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -27,7 +27,6 @@
+ #include <linux/acpi.h>
+ #include <linux/i2c.h>
+
+-#include <drm/drm_atomic.h>
+ #include <drm/drm_probe_helper.h>
+ #include <drm/amdgpu_drm.h>
+ #include <drm/drm_edid.h>
+@@ -120,27 +119,40 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
+ }
+
+ static void
+-fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state,
+- struct amdgpu_dm_connector *aconnector,
+- struct dc_dp_mst_stream_allocation_table *table)
++fill_dc_mst_payload_table_from_drm(struct amdgpu_dm_connector *aconnector,
++ struct dc_dp_mst_stream_allocation_table *proposed_table)
+ {
+- struct dc_dp_mst_stream_allocation_table new_table = { 0 };
+- struct dc_dp_mst_stream_allocation *sa;
+- struct drm_dp_mst_atomic_payload *payload;
+-
+- /* Fill payload info*/
+- list_for_each_entry(payload, &mst_state->payloads, next) {
+- if (payload->delete)
+- continue;
+-
+- sa = &new_table.stream_allocations[new_table.stream_count];
+- sa->slot_count = payload->time_slots;
+- sa->vcp_id = payload->vcpi;
+- new_table.stream_count++;
++ int i;
++ struct drm_dp_mst_topology_mgr *mst_mgr =
++ &aconnector->mst_port->mst_mgr;
++
++ mutex_lock(&mst_mgr->payload_lock);
++
++ proposed_table->stream_count = 0;
++
++ /* number of active streams */
++ for (i = 0; i < mst_mgr->max_payloads; i++) {
++ if (mst_mgr->payloads[i].num_slots == 0)
++ break; /* end of vcp_id table */
++
++ ASSERT(mst_mgr->payloads[i].payload_state !=
++ DP_PAYLOAD_DELETE_LOCAL);
++
++ if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
++ mst_mgr->payloads[i].payload_state ==
++ DP_PAYLOAD_REMOTE) {
++
++ struct dc_dp_mst_stream_allocation *sa =
++ &proposed_table->stream_allocations[
++ proposed_table->stream_count];
++
++ sa->slot_count = mst_mgr->payloads[i].num_slots;
++ sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
++ proposed_table->stream_count++;
++ }
+ }
+
+- /* Overwrite the old table */
+- *table = new_table;
++ mutex_unlock(&mst_mgr->payload_lock);
+ }
+
+ void dm_helpers_dp_update_branch_info(
+@@ -158,9 +170,11 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
+ bool enable)
+ {
+ struct amdgpu_dm_connector *aconnector;
+- struct drm_dp_mst_topology_state *mst_state;
+- struct drm_dp_mst_atomic_payload *payload;
++ struct dm_connector_state *dm_conn_state;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
++ struct drm_dp_mst_port *mst_port;
++ bool ret;
++ u8 link_coding_cap = DP_8b_10b_ENCODING;
+
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ /* Accessing the connector state is required for vcpi_slots allocation
+@@ -171,21 +185,40 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
+ if (!aconnector || !aconnector->mst_port)
+ return false;
+
++ dm_conn_state = to_dm_connector_state(aconnector->base.state);
++
+ mst_mgr = &aconnector->mst_port->mst_mgr;
+- mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
++
++ if (!mst_mgr->mst_state)
++ return false;
++
++ mst_port = aconnector->port;
++
++#if defined(CONFIG_DRM_AMD_DC_DCN)
++ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
++#endif
++
++ if (enable) {
++
++ ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port,
++ dm_conn_state->pbn,
++ dm_conn_state->vcpi_slots);
++ if (!ret)
++ return false;
++
++ } else {
++ drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
++ }
+
+ /* It's OK for this to fail */
+- payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
+- if (enable)
+- drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
+- else
+- drm_dp_remove_payload(mst_mgr, mst_state, payload);
++ drm_dp_update_payload_part1(mst_mgr, (link_coding_cap == DP_CAP_ANSI_128B132B) ? 0:1);
+
+ /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
+ * AUX message. The sequence is slot 1-63 allocated sequence for each
+ * stream. AMD ASIC stream slot allocation should follow the same
+ * sequence. copy DRM MST allocation to dc */
+- fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table);
++
++ fill_dc_mst_payload_table_from_drm(aconnector, proposed_table);
+
+ return true;
+ }
+@@ -242,9 +275,8 @@ bool dm_helpers_dp_mst_send_payload_allocation(
+ bool enable)
+ {
+ struct amdgpu_dm_connector *aconnector;
+- struct drm_dp_mst_topology_state *mst_state;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+- struct drm_dp_mst_atomic_payload *payload;
++ struct drm_dp_mst_port *mst_port;
+ enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD;
+ enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
+
+@@ -253,16 +285,19 @@ bool dm_helpers_dp_mst_send_payload_allocation(
+ if (!aconnector || !aconnector->mst_port)
+ return false;
+
++ mst_port = aconnector->port;
++
+ mst_mgr = &aconnector->mst_port->mst_mgr;
+- mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
+
+- payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
++ if (!mst_mgr->mst_state)
++ return false;
++
+ if (!enable) {
+ set_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
+ clr_flag = MST_ALLOCATE_NEW_PAYLOAD;
+ }
+
+- if (enable && drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload)) {
++ if (drm_dp_update_payload_part2(mst_mgr)) {
+ amdgpu_dm_set_mst_status(&aconnector->mst_status,
+ set_flag, false);
+ } else {
+@@ -272,6 +307,9 @@ bool dm_helpers_dp_mst_send_payload_allocation(
+ clr_flag, false);
+ }
+
++ if (!enable)
++ drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
++
+ return true;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 6483ba266893..d57f1528a295 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -598,8 +598,15 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
+
+ dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap);
+ aconnector->mst_mgr.cbs = &dm_mst_cbs;
+- drm_dp_mst_topology_mgr_init(&aconnector->mst_mgr, adev_to_drm(dm->adev),
+- &aconnector->dm_dp_aux.aux, 16, 4, aconnector->connector_id);
++ drm_dp_mst_topology_mgr_init(
++ &aconnector->mst_mgr,
++ adev_to_drm(dm->adev),
++ &aconnector->dm_dp_aux.aux,
++ 16,
++ 4,
++ max_link_enc_cap.lane_count,
++ drm_dp_bw_code_to_link_rate(max_link_enc_cap.link_rate),
++ aconnector->connector_id);
+
+ drm_connector_attach_dp_subconnector_property(&aconnector->base);
+ }
+@@ -703,13 +710,12 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
+ return dsc_config.bits_per_pixel;
+ }
+
+-static int increase_dsc_bpp(struct drm_atomic_state *state,
+- struct drm_dp_mst_topology_state *mst_state,
+- struct dc_link *dc_link,
+- struct dsc_mst_fairness_params *params,
+- struct dsc_mst_fairness_vars *vars,
+- int count,
+- int k)
++static bool increase_dsc_bpp(struct drm_atomic_state *state,
++ struct dc_link *dc_link,
++ struct dsc_mst_fairness_params *params,
++ struct dsc_mst_fairness_vars *vars,
++ int count,
++ int k)
+ {
+ int i;
+ bool bpp_increased[MAX_PIPES];
+@@ -717,10 +723,13 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
+ int min_initial_slack;
+ int next_index;
+ int remaining_to_increase = 0;
++ int pbn_per_timeslot;
+ int link_timeslots_used;
+ int fair_pbn_alloc;
+ int ret = 0;
+
++ pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link);
++
+ for (i = 0; i < count; i++) {
+ if (vars[i + k].dsc_enabled) {
+ initial_slack[i] =
+@@ -751,17 +760,18 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
+ link_timeslots_used = 0;
+
+ for (i = 0; i < count; i++)
+- link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div);
++ link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, pbn_per_timeslot);
+
+- fair_pbn_alloc =
+- (63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div;
++ fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
+
+ if (initial_slack[next_index] > fair_pbn_alloc) {
+ vars[next_index].pbn += fair_pbn_alloc;
++
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+- vars[next_index].pbn);
++ vars[next_index].pbn,
++ pbn_per_timeslot);
+ if (ret < 0)
+ return ret;
+
+@@ -773,7 +783,8 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+- vars[next_index].pbn);
++ vars[next_index].pbn,
++ pbn_per_timeslot);
+ if (ret < 0)
+ return ret;
+ }
+@@ -782,7 +793,8 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+- vars[next_index].pbn);
++ vars[next_index].pbn,
++ pbn_per_timeslot);
+ if (ret < 0)
+ return ret;
+
+@@ -794,7 +806,8 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+- vars[next_index].pbn);
++ vars[next_index].pbn,
++ pbn_per_timeslot);
+ if (ret < 0)
+ return ret;
+ }
+@@ -850,10 +863,12 @@ static int try_disable_dsc(struct drm_atomic_state *state,
+ break;
+
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
++
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+- vars[next_index].pbn);
++ vars[next_index].pbn,
++ dm_mst_get_pbn_divider(dc_link));
+ if (ret < 0)
+ return ret;
+
+@@ -863,10 +878,12 @@ static int try_disable_dsc(struct drm_atomic_state *state,
+ vars[next_index].bpp_x16 = 0;
+ } else {
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
++
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+- vars[next_index].pbn);
++ vars[next_index].pbn,
++ dm_mst_get_pbn_divider(dc_link));
+ if (ret < 0)
+ return ret;
+ }
+@@ -877,31 +894,21 @@ static int try_disable_dsc(struct drm_atomic_state *state,
+ return 0;
+ }
+
+-static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+- struct dc_state *dc_state,
+- struct dc_link *dc_link,
+- struct dsc_mst_fairness_vars *vars,
+- struct drm_dp_mst_topology_mgr *mgr,
+- int *link_vars_start_index)
++static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
++ struct dc_state *dc_state,
++ struct dc_link *dc_link,
++ struct dsc_mst_fairness_vars *vars,
++ int *link_vars_start_index)
+ {
++ int i, k, ret;
+ struct dc_stream_state *stream;
+ struct dsc_mst_fairness_params params[MAX_PIPES];
+ struct amdgpu_dm_connector *aconnector;
+- struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr);
+ int count = 0;
+- int i, k, ret;
+ bool debugfs_overwrite = false;
+
+ memset(params, 0, sizeof(params));
+
+- if (IS_ERR(mst_state))
+- return PTR_ERR(mst_state);
+-
+- mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link);
+-#if defined(CONFIG_DRM_AMD_DC_DCN)
+- drm_dp_mst_update_slots(mst_state, dc_link_dp_mst_decide_link_encoding_format(dc_link));
+-#endif
+-
+ /* Set up params */
+ for (i = 0; i < dc_state->stream_count; i++) {
+ struct dc_dsc_policy dsc_policy = {0};
+@@ -961,7 +968,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ vars[i + k].dsc_enabled = false;
+ vars[i + k].bpp_x16 = 0;
+ ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
+- vars[i + k].pbn);
++ vars[i + k].pbn, dm_mst_get_pbn_divider(dc_link));
+ if (ret < 0)
+ return ret;
+ }
+@@ -980,7 +987,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ vars[i + k].dsc_enabled = true;
+ vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
+ ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+- params[i].port, vars[i + k].pbn);
++ params[i].port, vars[i + k].pbn, dm_mst_get_pbn_divider(dc_link));
+ if (ret < 0)
+ return ret;
+ } else {
+@@ -988,7 +995,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ vars[i + k].dsc_enabled = false;
+ vars[i + k].bpp_x16 = 0;
+ ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+- params[i].port, vars[i + k].pbn);
++ params[i].port, vars[i + k].pbn, dm_mst_get_pbn_divider(dc_link));
+ if (ret < 0)
+ return ret;
+ }
+@@ -998,7 +1005,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ return ret;
+
+ /* Optimize degree of compression */
+- ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k);
++ ret = increase_dsc_bpp(state, dc_link, params, vars, count, k);
+ if (ret < 0)
+ return ret;
+
+@@ -1148,7 +1155,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+ continue;
+
+ mst_mgr = aconnector->port->mgr;
+- ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
++ ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars,
+ &link_vars_start_index);
+ if (ret != 0)
+ return ret;
+@@ -1206,7 +1213,7 @@ static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+ continue;
+
+ mst_mgr = aconnector->port->mgr;
+- ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
++ ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars,
+ &link_vars_start_index);
+ if (ret != 0)
+ return ret;
+diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
+index d1e91d31d151..0889c2a86733 100644
+--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
++++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
+@@ -252,9 +252,6 @@ union dpcd_training_lane_set {
+ * _ONLY_ be filled out from DM and then passed to DC, do NOT use these for _any_ kind of atomic
+ * state calculations in DM, or you will break something.
+ */
+-
+-struct drm_dp_mst_port;
+-
+ /* DP MST stream allocation (payload bandwidth number) */
+ struct dc_dp_mst_stream_allocation {
+ uint8_t vcp_id;
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index 51a46689cda7..95ff57d20216 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -68,7 +68,8 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
+ static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
+
+ static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
+- int id, u8 start_slot, u8 num_slots);
++ int id,
++ struct drm_dp_payload *payload);
+
+ static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+@@ -1234,6 +1235,57 @@ build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
+ return 0;
+ }
+
++static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_vcpi *vcpi)
++{
++ int ret, vcpi_ret;
++
++ mutex_lock(&mgr->payload_lock);
++ ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
++ if (ret > mgr->max_payloads) {
++ ret = -EINVAL;
++ drm_dbg_kms(mgr->dev, "out of payload ids %d\n", ret);
++ goto out_unlock;
++ }
++
++ vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
++ if (vcpi_ret > mgr->max_payloads) {
++ ret = -EINVAL;
++ drm_dbg_kms(mgr->dev, "out of vcpi ids %d\n", ret);
++ goto out_unlock;
++ }
++
++ set_bit(ret, &mgr->payload_mask);
++ set_bit(vcpi_ret, &mgr->vcpi_mask);
++ vcpi->vcpi = vcpi_ret + 1;
++ mgr->proposed_vcpis[ret - 1] = vcpi;
++out_unlock:
++ mutex_unlock(&mgr->payload_lock);
++ return ret;
++}
++
++static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
++ int vcpi)
++{
++ int i;
++
++ if (vcpi == 0)
++ return;
++
++ mutex_lock(&mgr->payload_lock);
++ drm_dbg_kms(mgr->dev, "putting payload %d\n", vcpi);
++ clear_bit(vcpi - 1, &mgr->vcpi_mask);
++
++ for (i = 0; i < mgr->max_payloads; i++) {
++ if (mgr->proposed_vcpis[i] &&
++ mgr->proposed_vcpis[i]->vcpi == vcpi) {
++ mgr->proposed_vcpis[i] = NULL;
++ clear_bit(i + 1, &mgr->payload_mask);
++ }
++ }
++ mutex_unlock(&mgr->payload_lock);
++}
++
+ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_sideband_msg_tx *txmsg)
+ {
+@@ -1686,7 +1738,7 @@ drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
+ #define save_port_topology_ref(port, type)
+ #endif
+
+-struct drm_dp_mst_atomic_payload *
++static struct drm_dp_mst_atomic_payload *
+ drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
+ struct drm_dp_mst_port *port)
+ {
+@@ -1698,7 +1750,6 @@ drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
+
+ return NULL;
+ }
+-EXPORT_SYMBOL(drm_atomic_get_mst_payload_state);
+
+ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+ {
+@@ -3201,8 +3252,6 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ struct drm_dp_query_stream_enc_status_ack_reply *status)
+ {
+- struct drm_dp_mst_topology_state *state;
+- struct drm_dp_mst_atomic_payload *payload;
+ struct drm_dp_sideband_msg_tx *txmsg;
+ u8 nonce[7];
+ int ret;
+@@ -3219,10 +3268,6 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
+
+ get_random_bytes(nonce, sizeof(nonce));
+
+- drm_modeset_lock(&mgr->base.lock, NULL);
+- state = to_drm_dp_mst_topology_state(mgr->base.state);
+- payload = drm_atomic_get_mst_payload_state(state, port);
+-
+ /*
+ * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
+ * transaction at the MST Branch device directly connected to the
+@@ -3230,7 +3275,7 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
+ */
+ txmsg->dst = mgr->mst_primary;
+
+- build_query_stream_enc_status(txmsg, payload->vcpi, nonce);
++ build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce);
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+@@ -3247,7 +3292,6 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
+ memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
+
+ out:
+- drm_modeset_unlock(&mgr->base.lock);
+ drm_dp_mst_topology_put_port(port);
+ out_get_port:
+ kfree(txmsg);
+@@ -3256,162 +3300,238 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
+ EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
+
+ static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
+- struct drm_dp_mst_atomic_payload *payload)
++ int id,
++ struct drm_dp_payload *payload)
+ {
+- return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot,
+- payload->time_slots);
++ int ret;
++
++ ret = drm_dp_dpcd_write_payload(mgr, id, payload);
++ if (ret < 0) {
++ payload->payload_state = 0;
++ return ret;
++ }
++ payload->payload_state = DP_PAYLOAD_LOCAL;
++ return 0;
+ }
+
+ static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
+- struct drm_dp_mst_atomic_payload *payload)
++ struct drm_dp_mst_port *port,
++ int id,
++ struct drm_dp_payload *payload)
+ {
+ int ret;
+- struct drm_dp_mst_port *port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
+-
+- if (!port)
+- return -EIO;
+
+- ret = drm_dp_payload_send_msg(mgr, port, payload->vcpi, payload->pbn);
+- drm_dp_mst_topology_put_port(port);
++ ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
++ if (ret < 0)
++ return ret;
++ payload->payload_state = DP_PAYLOAD_REMOTE;
+ return ret;
+ }
+
+ static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
+- struct drm_dp_mst_topology_state *mst_state,
+- struct drm_dp_mst_atomic_payload *payload)
++ struct drm_dp_mst_port *port,
++ int id,
++ struct drm_dp_payload *payload)
+ {
+ drm_dbg_kms(mgr->dev, "\n");
+-
+ /* it's okay for these to fail */
+- drm_dp_payload_send_msg(mgr, payload->port, payload->vcpi, 0);
+- drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0);
++ if (port) {
++ drm_dp_payload_send_msg(mgr, port, id, 0);
++ }
+
++ drm_dp_dpcd_write_payload(mgr, id, payload);
++ payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
++ return 0;
++}
++
++static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
++ int id,
++ struct drm_dp_payload *payload)
++{
++ payload->payload_state = 0;
+ return 0;
+ }
+
+ /**
+- * drm_dp_add_payload_part1() - Execute payload update part 1
+- * @mgr: Manager to use.
+- * @mst_state: The MST atomic state
+- * @payload: The payload to write
++ * drm_dp_update_payload_part1() - Execute payload update part 1
++ * @mgr: manager to use.
++ * @start_slot: this is the cur slot
++ *
++ * NOTE: start_slot is a temporary workaround for non-atomic drivers,
++ * this will be removed when non-atomic mst helpers are moved out of the helper
+ *
+- * Determines the starting time slot for the given payload, and programs the VCPI for this payload
+- * into hardware. After calling this, the driver should generate ACT and payload packets.
++ * This iterates over all proposed virtual channels, and tries to
++ * allocate space in the link for them. For 0->slots transitions,
++ * this step just writes the VCPI to the MST device. For slots->0
++ * transitions, this writes the updated VCPIs and removes the
++ * remote VC payloads.
+ *
+- * Returns: 0 on success, error code on failure. In the event that this fails,
+- * @payload.vc_start_slot will also be set to -1.
++ * after calling this the driver should generate ACT and payload
++ * packets.
+ */
+-int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
+- struct drm_dp_mst_topology_state *mst_state,
+- struct drm_dp_mst_atomic_payload *payload)
++int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot)
+ {
++ struct drm_dp_payload req_payload;
+ struct drm_dp_mst_port *port;
+- int ret;
++ int i, j;
++ int cur_slots = start_slot;
++ bool skip;
+
+- port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
+- if (!port)
+- return 0;
++ mutex_lock(&mgr->payload_lock);
++ for (i = 0; i < mgr->max_payloads; i++) {
++ struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
++ struct drm_dp_payload *payload = &mgr->payloads[i];
++ bool put_port = false;
+
+- if (mgr->payload_count == 0)
+- mgr->next_start_slot = mst_state->start_slot;
++ /* solve the current payloads - compare to the hw ones
++ - update the hw view */
++ req_payload.start_slot = cur_slots;
++ if (vcpi) {
++ port = container_of(vcpi, struct drm_dp_mst_port,
++ vcpi);
+
+- payload->vc_start_slot = mgr->next_start_slot;
++ mutex_lock(&mgr->lock);
++ skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
++ mutex_unlock(&mgr->lock);
+
+- ret = drm_dp_create_payload_step1(mgr, payload);
+- drm_dp_mst_topology_put_port(port);
+- if (ret < 0) {
+- drm_warn(mgr->dev, "Failed to create MST payload for port %p: %d\n",
+- payload->port, ret);
+- payload->vc_start_slot = -1;
+- return ret;
+- }
++ if (skip) {
++ drm_dbg_kms(mgr->dev,
++ "Virtual channel %d is not in current topology\n",
++ i);
++ continue;
++ }
++ /* Validated ports don't matter if we're releasing
++ * VCPI
++ */
++ if (vcpi->num_slots) {
++ port = drm_dp_mst_topology_get_port_validated(
++ mgr, port);
++ if (!port) {
++ if (vcpi->num_slots == payload->num_slots) {
++ cur_slots += vcpi->num_slots;
++ payload->start_slot = req_payload.start_slot;
++ continue;
++ } else {
++ drm_dbg_kms(mgr->dev,
++ "Fail:set payload to invalid sink");
++ mutex_unlock(&mgr->payload_lock);
++ return -EINVAL;
++ }
++ }
++ put_port = true;
++ }
+
+- mgr->payload_count++;
+- mgr->next_start_slot += payload->time_slots;
++ req_payload.num_slots = vcpi->num_slots;
++ req_payload.vcpi = vcpi->vcpi;
++ } else {
++ port = NULL;
++ req_payload.num_slots = 0;
++ }
+
+- return 0;
+-}
+-EXPORT_SYMBOL(drm_dp_add_payload_part1);
++ payload->start_slot = req_payload.start_slot;
++ /* work out what is required to happen with this payload */
++ if (payload->num_slots != req_payload.num_slots) {
++
++ /* need to push an update for this payload */
++ if (req_payload.num_slots) {
++ drm_dp_create_payload_step1(mgr, vcpi->vcpi,
++ &req_payload);
++ payload->num_slots = req_payload.num_slots;
++ payload->vcpi = req_payload.vcpi;
++
++ } else if (payload->num_slots) {
++ payload->num_slots = 0;
++ drm_dp_destroy_payload_step1(mgr, port,
++ payload->vcpi,
++ payload);
++ req_payload.payload_state =
++ payload->payload_state;
++ payload->start_slot = 0;
++ }
++ payload->payload_state = req_payload.payload_state;
++ }
++ cur_slots += req_payload.num_slots;
+
+-/**
+- * drm_dp_remove_payload() - Remove an MST payload
+- * @mgr: Manager to use.
+- * @mst_state: The MST atomic state
+- * @payload: The payload to write
+- *
+- * Removes a payload from an MST topology if it was successfully assigned a start slot. Also updates
+- * the starting time slots of all other payloads which would have been shifted towards the start of
+- * the VC table as a result. After calling this, the driver should generate ACT and payload packets.
+- */
+-void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
+- struct drm_dp_mst_topology_state *mst_state,
+- struct drm_dp_mst_atomic_payload *payload)
+-{
+- struct drm_dp_mst_atomic_payload *pos;
+- bool send_remove = false;
++ if (put_port)
++ drm_dp_mst_topology_put_port(port);
++ }
+
+- /* We failed to make the payload, so nothing to do */
+- if (payload->vc_start_slot == -1)
+- return;
++ for (i = 0; i < mgr->max_payloads; /* do nothing */) {
++ if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
++ i++;
++ continue;
++ }
+
+- mutex_lock(&mgr->lock);
+- send_remove = drm_dp_mst_port_downstream_of_branch(payload->port, mgr->mst_primary);
+- mutex_unlock(&mgr->lock);
++ drm_dbg_kms(mgr->dev, "removing payload %d\n", i);
++ for (j = i; j < mgr->max_payloads - 1; j++) {
++ mgr->payloads[j] = mgr->payloads[j + 1];
++ mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
+
+- if (send_remove)
+- drm_dp_destroy_payload_step1(mgr, mst_state, payload);
+- else
+- drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n",
+- payload->vcpi);
++ if (mgr->proposed_vcpis[j] &&
++ mgr->proposed_vcpis[j]->num_slots) {
++ set_bit(j + 1, &mgr->payload_mask);
++ } else {
++ clear_bit(j + 1, &mgr->payload_mask);
++ }
++ }
+
+- list_for_each_entry(pos, &mst_state->payloads, next) {
+- if (pos != payload && pos->vc_start_slot > payload->vc_start_slot)
+- pos->vc_start_slot -= payload->time_slots;
++ memset(&mgr->payloads[mgr->max_payloads - 1], 0,
++ sizeof(struct drm_dp_payload));
++ mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
++ clear_bit(mgr->max_payloads, &mgr->payload_mask);
+ }
+- payload->vc_start_slot = -1;
++ mutex_unlock(&mgr->payload_lock);
+
+- mgr->payload_count--;
+- mgr->next_start_slot -= payload->time_slots;
++ return 0;
+ }
+-EXPORT_SYMBOL(drm_dp_remove_payload);
++EXPORT_SYMBOL(drm_dp_update_payload_part1);
+
+ /**
+- * drm_dp_add_payload_part2() - Execute payload update part 2
+- * @mgr: Manager to use.
+- * @state: The global atomic state
+- * @payload: The payload to update
+- *
+- * If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this
+- * function will send the sideband messages to finish allocating this payload.
++ * drm_dp_update_payload_part2() - Execute payload update part 2
++ * @mgr: manager to use.
+ *
+- * Returns: 0 on success, negative error code on failure.
++ * This iterates over all proposed virtual channels, and tries to
++ * allocate space in the link for them. For 0->slots transitions,
++ * this step writes the remote VC payload commands. For slots->0
++ * this just resets some internal state.
+ */
+-int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
+- struct drm_atomic_state *state,
+- struct drm_dp_mst_atomic_payload *payload)
++int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
+ {
++ struct drm_dp_mst_port *port;
++ int i;
+ int ret = 0;
++ bool skip;
+
+- /* Skip failed payloads */
+- if (payload->vc_start_slot == -1) {
+- drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
+- payload->port->connector->name);
+- return -EIO;
+- }
++ mutex_lock(&mgr->payload_lock);
++ for (i = 0; i < mgr->max_payloads; i++) {
+
+- ret = drm_dp_create_payload_step2(mgr, payload);
+- if (ret < 0) {
+- if (!payload->delete)
+- drm_err(mgr->dev, "Step 2 of creating MST payload for %p failed: %d\n",
+- payload->port, ret);
+- else
+- drm_dbg_kms(mgr->dev, "Step 2 of removing MST payload for %p failed: %d\n",
+- payload->port, ret);
+- }
++ if (!mgr->proposed_vcpis[i])
++ continue;
+
+- return ret;
++ port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
++
++ mutex_lock(&mgr->lock);
++ skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
++ mutex_unlock(&mgr->lock);
++
++ if (skip)
++ continue;
++
++ drm_dbg_kms(mgr->dev, "payload %d %d\n", i, mgr->payloads[i].payload_state);
++ if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
++ ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
++ } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
++ ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
++ }
++ if (ret) {
++ mutex_unlock(&mgr->payload_lock);
++ return ret;
++ }
++ }
++ mutex_unlock(&mgr->payload_lock);
++ return 0;
+ }
+-EXPORT_SYMBOL(drm_dp_add_payload_part2);
++EXPORT_SYMBOL(drm_dp_update_payload_part2);
+
+ static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+@@ -3591,6 +3711,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ int ret = 0;
+ struct drm_dp_mst_branch *mstb = NULL;
+
++ mutex_lock(&mgr->payload_lock);
+ mutex_lock(&mgr->lock);
+ if (mst_state == mgr->mst_state)
+ goto out_unlock;
+@@ -3598,6 +3719,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ mgr->mst_state = mst_state;
+ /* set the device into MST mode */
+ if (mst_state) {
++ struct drm_dp_payload reset_pay;
++ int lane_count;
++ int link_rate;
++
+ WARN_ON(mgr->mst_primary);
+
+ /* get dpcd info */
+@@ -3608,6 +3733,16 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ goto out_unlock;
+ }
+
++ lane_count = min_t(int, mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, mgr->max_lane_count);
++ link_rate = min_t(int, drm_dp_bw_code_to_link_rate(mgr->dpcd[1]), mgr->max_link_rate);
++ mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr,
++ link_rate,
++ lane_count);
++ if (mgr->pbn_div == 0) {
++ ret = -EINVAL;
++ goto out_unlock;
++ }
++
+ /* add initial branch device at LCT 1 */
+ mstb = drm_dp_add_mst_branch_device(1, NULL);
+ if (mstb == NULL) {
+@@ -3627,8 +3762,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ if (ret < 0)
+ goto out_unlock;
+
+- /* Write reset payload */
+- drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f);
++ reset_pay.start_slot = 0;
++ reset_pay.num_slots = 0x3f;
++ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
+
+ queue_work(system_long_wq, &mgr->work);
+
+@@ -3640,11 +3776,19 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ /* this can fail if the device is gone */
+ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ ret = 0;
++ memset(mgr->payloads, 0,
++ mgr->max_payloads * sizeof(mgr->payloads[0]));
++ memset(mgr->proposed_vcpis, 0,
++ mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
++ mgr->payload_mask = 0;
++ set_bit(0, &mgr->payload_mask);
++ mgr->vcpi_mask = 0;
+ mgr->payload_id_table_cleared = false;
+ }
+
+ out_unlock:
+ mutex_unlock(&mgr->lock);
++ mutex_unlock(&mgr->payload_lock);
+ if (mstb)
+ drm_dp_mst_topology_put_mstb(mstb);
+ return ret;
+@@ -4163,18 +4307,62 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
+ }
+ EXPORT_SYMBOL(drm_dp_mst_get_edid);
+
++/**
++ * drm_dp_find_vcpi_slots() - Find time slots for this PBN value
++ * @mgr: manager to use
++ * @pbn: payload bandwidth to convert into slots.
++ *
++ * Calculate the number of time slots that will be required for the given PBN
++ * value. This function is deprecated, and should not be used in atomic
++ * drivers.
++ *
++ * RETURNS:
++ * The total slots required for this port, or error.
++ */
++int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
++ int pbn)
++{
++ int num_slots;
++
++ num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
++
++ /* max. time slots - one slot for MTP header */
++ if (num_slots > 63)
++ return -ENOSPC;
++ return num_slots;
++}
++EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
++
++static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_vcpi *vcpi, int pbn, int slots)
++{
++ int ret;
++
++ vcpi->pbn = pbn;
++ vcpi->aligned_pbn = slots * mgr->pbn_div;
++ vcpi->num_slots = slots;
++
++ ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
++ if (ret < 0)
++ return ret;
++ return 0;
++}
++
+ /**
+ * drm_dp_atomic_find_time_slots() - Find and add time slots to the state
+ * @state: global atomic state
+ * @mgr: MST topology manager for the port
+ * @port: port to find time slots for
+ * @pbn: bandwidth required for the mode in PBN
++ * @pbn_div: divider for DSC mode that takes FEC into account
+ *
+- * Allocates time slots to @port, replacing any previous time slot allocations it may
+- * have had. Any atomic drivers which support MST must call this function in
+- * their &drm_encoder_helper_funcs.atomic_check() callback unconditionally to
+- * change the current time slot allocation for the new state, and ensure the MST
+- * atomic state is added whenever the state of payloads in the topology changes.
++ * Allocates time slots to @port, replacing any previous timeslot allocations it
++ * may have had. Any atomic drivers which support MST must call this function
++ * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
++ * current timeslot allocation for the new state, but only when
++ * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
++ * to ensure compatibility with userspace applications that still use the
++ * legacy modesetting UAPI.
+ *
+ * Allocations set by this function are not checked against the bandwidth
+ * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
+@@ -4193,7 +4381,8 @@ EXPORT_SYMBOL(drm_dp_mst_get_edid);
+ */
+ int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr,
+- struct drm_dp_mst_port *port, int pbn)
++ struct drm_dp_mst_port *port, int pbn,
++ int pbn_div)
+ {
+ struct drm_dp_mst_topology_state *topology_state;
+ struct drm_dp_mst_atomic_payload *payload = NULL;
+@@ -4226,7 +4415,10 @@ int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
+ }
+ }
+
+- req_slots = DIV_ROUND_UP(pbn, topology_state->pbn_div);
++ if (pbn_div <= 0)
++ pbn_div = mgr->pbn_div;
++
++ req_slots = DIV_ROUND_UP(pbn, pbn_div);
+
+ drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n",
+ port->connector->base.id, port->connector->name,
+@@ -4235,7 +4427,7 @@ int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
+ port->connector->base.id, port->connector->name,
+ port, prev_bw, pbn);
+
+- /* Add the new allocation to the state, note the VCPI isn't assigned until the end */
++ /* Add the new allocation to the state */
+ if (!payload) {
+ payload = kzalloc(sizeof(*payload), GFP_KERNEL);
+ if (!payload)
+@@ -4243,7 +4435,6 @@ int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
+
+ drm_dp_mst_get_port_malloc(port);
+ payload->port = port;
+- payload->vc_start_slot = -1;
+ list_add(&payload->next, &topology_state->payloads);
+ }
+ payload->time_slots = req_slots;
+@@ -4260,12 +4451,10 @@ EXPORT_SYMBOL(drm_dp_atomic_find_time_slots);
+ * @port: The port to release the time slots from
+ *
+ * Releases any time slots that have been allocated to a port in the atomic
+- * state. Any atomic drivers which support MST must call this function
+- * unconditionally in their &drm_connector_helper_funcs.atomic_check() callback.
+- * This helper will check whether time slots would be released by the new state and
+- * respond accordingly, along with ensuring the MST state is always added to the
+- * atomic state whenever a new state would modify the state of payloads on the
+- * topology.
++ * state. Any atomic drivers which support MST must call this function in
++ * their &drm_connector_helper_funcs.atomic_check() callback when the
++ * connector will no longer have VCPI allocated (e.g. because its CRTC was
++ * removed) when it had VCPI allocated in the previous atomic state.
+ *
+ * It is OK to call this even if @port has been removed from the system.
+ * Additionally, it is OK to call this function multiple times on the same
+@@ -4330,7 +4519,6 @@ int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
+ drm_dp_mst_put_port_malloc(port);
+ payload->pbn = 0;
+ payload->delete = true;
+- topology_state->payload_mask &= ~BIT(payload->vcpi - 1);
+ }
+
+ return 0;
+@@ -4381,8 +4569,7 @@ int drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state)
+ EXPORT_SYMBOL(drm_dp_mst_atomic_setup_commit);
+
+ /**
+- * drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies,
+- * prepare new MST state for commit
++ * drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies
+ * @state: global atomic state
+ *
+ * Goes through any MST topologies in this atomic state, and waits for any pending commits which
+@@ -4400,30 +4587,17 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_setup_commit);
+ */
+ void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state)
+ {
+- struct drm_dp_mst_topology_state *old_mst_state, *new_mst_state;
++ struct drm_dp_mst_topology_state *old_mst_state;
+ struct drm_dp_mst_topology_mgr *mgr;
+- struct drm_dp_mst_atomic_payload *old_payload, *new_payload;
+ int i, j, ret;
+
+- for_each_oldnew_mst_mgr_in_state(state, mgr, old_mst_state, new_mst_state, i) {
++ for_each_old_mst_mgr_in_state(state, mgr, old_mst_state, i) {
+ for (j = 0; j < old_mst_state->num_commit_deps; j++) {
+ ret = drm_crtc_commit_wait(old_mst_state->commit_deps[j]);
+ if (ret < 0)
+ drm_err(state->dev, "Failed to wait for %s: %d\n",
+ old_mst_state->commit_deps[j]->crtc->name, ret);
+ }
+-
+- /* Now that previous state is committed, it's safe to copy over the start slot
+- * assignments
+- */
+- list_for_each_entry(old_payload, &old_mst_state->payloads, next) {
+- if (old_payload->delete)
+- continue;
+-
+- new_payload = drm_atomic_get_mst_payload_state(new_mst_state,
+- old_payload->port);
+- new_payload->vc_start_slot = old_payload->vc_start_slot;
+- }
+ }
+ }
+ EXPORT_SYMBOL(drm_dp_mst_atomic_wait_for_dependencies);
+@@ -4508,8 +4682,119 @@ void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_
+ }
+ EXPORT_SYMBOL(drm_dp_mst_update_slots);
+
++/**
++ * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
++ * @mgr: manager for this port
++ * @port: port to allocate a virtual channel for.
++ * @pbn: payload bandwidth number to request
++ * @slots: returned number of slots for this PBN.
++ */
++bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_mst_port *port, int pbn, int slots)
++{
++ int ret;
++
++ if (slots < 0)
++ return false;
++
++ port = drm_dp_mst_topology_get_port_validated(mgr, port);
++ if (!port)
++ return false;
++
++ if (port->vcpi.vcpi > 0) {
++ drm_dbg_kms(mgr->dev,
++ "payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
++ port->vcpi.vcpi, port->vcpi.pbn, pbn);
++ if (pbn == port->vcpi.pbn) {
++ drm_dp_mst_topology_put_port(port);
++ return true;
++ }
++ }
++
++ ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
++ if (ret) {
++ drm_dbg_kms(mgr->dev, "failed to init time slots=%d ret=%d\n",
++ DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
++ drm_dp_mst_topology_put_port(port);
++ goto out;
++ }
++ drm_dbg_kms(mgr->dev, "initing vcpi for pbn=%d slots=%d\n", pbn, port->vcpi.num_slots);
++
++ /* Keep port allocated until its payload has been removed */
++ drm_dp_mst_get_port_malloc(port);
++ drm_dp_mst_topology_put_port(port);
++ return true;
++out:
++ return false;
++}
++EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
++
++int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
++{
++ int slots = 0;
++
++ port = drm_dp_mst_topology_get_port_validated(mgr, port);
++ if (!port)
++ return slots;
++
++ slots = port->vcpi.num_slots;
++ drm_dp_mst_topology_put_port(port);
++ return slots;
++}
++EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
++
++/**
++ * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
++ * @mgr: manager for this port
++ * @port: unverified pointer to a port.
++ *
++ * This just resets the number of slots for the ports VCPI for later programming.
++ */
++void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
++{
++ /*
++ * A port with VCPI will remain allocated until its VCPI is
++ * released, no verified ref needed
++ */
++
++ port->vcpi.num_slots = 0;
++}
++EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
++
++/**
++ * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
++ * @mgr: manager for this port
++ * @port: port to deallocate vcpi for
++ *
++ * This can be called unconditionally, regardless of whether
++ * drm_dp_mst_allocate_vcpi() succeeded or not.
++ */
++void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_mst_port *port)
++{
++ bool skip;
++
++ if (!port->vcpi.vcpi)
++ return;
++
++ mutex_lock(&mgr->lock);
++ skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
++ mutex_unlock(&mgr->lock);
++
++ if (skip)
++ return;
++
++ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
++ port->vcpi.num_slots = 0;
++ port->vcpi.pbn = 0;
++ port->vcpi.aligned_pbn = 0;
++ port->vcpi.vcpi = 0;
++ drm_dp_mst_put_port_malloc(port);
++}
++EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
++
+ static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
+- int id, u8 start_slot, u8 num_slots)
++ int id, struct drm_dp_payload *payload)
+ {
+ u8 payload_alloc[3], status;
+ int ret;
+@@ -4519,8 +4804,8 @@ static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
+ DP_PAYLOAD_TABLE_UPDATED);
+
+ payload_alloc[0] = id;
+- payload_alloc[1] = start_slot;
+- payload_alloc[2] = num_slots;
++ payload_alloc[1] = payload->start_slot;
++ payload_alloc[2] = payload->num_slots;
+
+ ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
+ if (ret != 3) {
+@@ -4735,9 +5020,8 @@ static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
+ void drm_dp_mst_dump_topology(struct seq_file *m,
+ struct drm_dp_mst_topology_mgr *mgr)
+ {
+- struct drm_dp_mst_topology_state *state;
+- struct drm_dp_mst_atomic_payload *payload;
+- int i, ret;
++ int i;
++ struct drm_dp_mst_port *port;
+
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_primary)
+@@ -4746,35 +5030,36 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
+ /* dump VCPIs */
+ mutex_unlock(&mgr->lock);
+
+- ret = drm_modeset_lock_single_interruptible(&mgr->base.lock);
+- if (ret < 0)
+- return;
++ mutex_lock(&mgr->payload_lock);
++ seq_printf(m, "\n*** VCPI Info ***\n");
++ seq_printf(m, "payload_mask: %lx, vcpi_mask: %lx, max_payloads: %d\n", mgr->payload_mask, mgr->vcpi_mask, mgr->max_payloads);
+
+- state = to_drm_dp_mst_topology_state(mgr->base.state);
+- seq_printf(m, "\n*** Atomic state info ***\n");
+- seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n",
+- state->payload_mask, mgr->max_payloads, state->start_slot, state->pbn_div);
+-
+- seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc | sink name |\n");
++ seq_printf(m, "\n| idx | port # | vcp_id | # slots | sink name |\n");
+ for (i = 0; i < mgr->max_payloads; i++) {
+- list_for_each_entry(payload, &state->payloads, next) {
++ if (mgr->proposed_vcpis[i]) {
+ char name[14];
+
+- if (payload->vcpi != i || payload->delete)
+- continue;
+-
+- fetch_monitor_name(mgr, payload->port, name, sizeof(name));
+- seq_printf(m, " %5d %6d %6d %02d - %02d %5d %5s %19s\n",
++ port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
++ fetch_monitor_name(mgr, port, name, sizeof(name));
++ seq_printf(m, "%10d%10d%10d%10d%20s\n",
+ i,
+- payload->port->port_num,
+- payload->vcpi,
+- payload->vc_start_slot,
+- payload->vc_start_slot + payload->time_slots - 1,
+- payload->pbn,
+- payload->dsc_enabled ? "Y" : "N",
++ port->port_num,
++ port->vcpi.vcpi,
++ port->vcpi.num_slots,
+ (*name != 0) ? name : "Unknown");
+- }
++ } else
++ seq_printf(m, "%6d - Unused\n", i);
++ }
++ seq_printf(m, "\n*** Payload Info ***\n");
++ seq_printf(m, "| idx | state | start slot | # slots |\n");
++ for (i = 0; i < mgr->max_payloads; i++) {
++ seq_printf(m, "%10d%10d%15d%10d\n",
++ i,
++ mgr->payloads[i].payload_state,
++ mgr->payloads[i].start_slot,
++ mgr->payloads[i].num_slots);
+ }
++ mutex_unlock(&mgr->payload_lock);
+
+ seq_printf(m, "\n*** DPCD Info ***\n");
+ mutex_lock(&mgr->lock);
+@@ -4820,7 +5105,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
+
+ out:
+ mutex_unlock(&mgr->lock);
+- drm_modeset_unlock(&mgr->base.lock);
++
+ }
+ EXPORT_SYMBOL(drm_dp_mst_dump_topology);
+
+@@ -5141,22 +5426,9 @@ drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr
+ mgr, mst_state, mgr->max_payloads);
+ return -EINVAL;
+ }
+-
+- /* Assign a VCPI */
+- if (!payload->vcpi) {
+- payload->vcpi = ffz(mst_state->payload_mask) + 1;
+- drm_dbg_atomic(mgr->dev, "[MST PORT:%p] assigned VCPI #%d\n",
+- payload->port, payload->vcpi);
+- mst_state->payload_mask |= BIT(payload->vcpi - 1);
+- }
+ }
+-
+- if (!payload_count)
+- mst_state->pbn_div = 0;
+-
+- drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n",
+- mgr, mst_state, mst_state->pbn_div, avail_slots,
+- mst_state->total_avail_slots - avail_slots);
++ drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU avail=%d used=%d\n",
++ mgr, mst_state, avail_slots, mst_state->total_avail_slots - avail_slots);
+
+ return 0;
+ }
+@@ -5227,6 +5499,7 @@ EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
+ * @state: Pointer to the new drm_atomic_state
+ * @port: Pointer to the affected MST Port
+ * @pbn: Newly recalculated bw required for link with DSC enabled
++ * @pbn_div: Divider to calculate correct number of pbn per slot
+ * @enable: Boolean flag to enable or disable DSC on the port
+ *
+ * This function enables DSC on the given Port
+@@ -5237,7 +5510,8 @@ EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
+ */
+ int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
+ struct drm_dp_mst_port *port,
+- int pbn, bool enable)
++ int pbn, int pbn_div,
++ bool enable)
+ {
+ struct drm_dp_mst_topology_state *mst_state;
+ struct drm_dp_mst_atomic_payload *payload;
+@@ -5263,7 +5537,7 @@ int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
+ }
+
+ if (enable) {
+- time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn);
++ time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn, pbn_div);
+ drm_dbg_atomic(state->dev,
+ "[MST PORT:%p] Enabling DSC flag, reallocating %d time slots on the port\n",
+ port, time_slots);
+@@ -5276,7 +5550,6 @@ int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
+ return time_slots;
+ }
+ EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
+-
+ /**
+ * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
+ * atomic update is valid
+@@ -5334,6 +5607,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
+
+ /**
+ * drm_atomic_get_mst_topology_state: get MST topology state
++ *
+ * @state: global atomic state
+ * @mgr: MST topology manager, also the private object in this case
+ *
+@@ -5352,31 +5626,6 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a
+ }
+ EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
+
+-/**
+- * drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any
+- * @state: global atomic state
+- * @mgr: MST topology manager, also the private object in this case
+- *
+- * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
+- * state vtable so that the private object state returned is that of a MST
+- * topology object.
+- *
+- * Returns:
+- *
+- * The MST topology state, or NULL if there's no topology state for this MST mgr
+- * in the global atomic state
+- */
+-struct drm_dp_mst_topology_state *
+-drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
+- struct drm_dp_mst_topology_mgr *mgr)
+-{
+- struct drm_private_state *priv_state =
+- drm_atomic_get_new_private_obj_state(state, &mgr->base);
+-
+- return priv_state ? to_dp_mst_topology_state(priv_state) : NULL;
+-}
+-EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);
+-
+ /**
+ * drm_dp_mst_topology_mgr_init - initialise a topology manager
+ * @mgr: manager struct to initialise
+@@ -5384,6 +5633,8 @@ EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);
+ * @aux: DP helper aux channel to talk to this device
+ * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
+ * @max_payloads: maximum number of payloads this GPU can source
++ * @max_lane_count: maximum number of lanes this GPU supports
++ * @max_link_rate: maximum link rate per lane this GPU supports in kHz
+ * @conn_base_id: the connector object ID the MST device is connected to.
+ *
+ * Return 0 for success, or negative error code on failure
+@@ -5391,12 +5642,14 @@ EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);
+ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_device *dev, struct drm_dp_aux *aux,
+ int max_dpcd_transaction_bytes, int max_payloads,
++ int max_lane_count, int max_link_rate,
+ int conn_base_id)
+ {
+ struct drm_dp_mst_topology_state *mst_state;
+
+ mutex_init(&mgr->lock);
+ mutex_init(&mgr->qlock);
++ mutex_init(&mgr->payload_lock);
+ mutex_init(&mgr->delayed_destroy_lock);
+ mutex_init(&mgr->up_req_lock);
+ mutex_init(&mgr->probe_lock);
+@@ -5426,7 +5679,19 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
+ mgr->aux = aux;
+ mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
+ mgr->max_payloads = max_payloads;
++ mgr->max_lane_count = max_lane_count;
++ mgr->max_link_rate = max_link_rate;
+ mgr->conn_base_id = conn_base_id;
++ if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
++ max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
++ return -EINVAL;
++ mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
++ if (!mgr->payloads)
++ return -ENOMEM;
++ mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
++ if (!mgr->proposed_vcpis)
++ return -ENOMEM;
++ set_bit(0, &mgr->payload_mask);
+
+ mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
+ if (mst_state == NULL)
+@@ -5459,12 +5724,19 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
+ destroy_workqueue(mgr->delayed_destroy_wq);
+ mgr->delayed_destroy_wq = NULL;
+ }
++ mutex_lock(&mgr->payload_lock);
++ kfree(mgr->payloads);
++ mgr->payloads = NULL;
++ kfree(mgr->proposed_vcpis);
++ mgr->proposed_vcpis = NULL;
++ mutex_unlock(&mgr->payload_lock);
+ mgr->dev = NULL;
+ mgr->aux = NULL;
+ drm_atomic_private_obj_fini(&mgr->base);
+ mgr->funcs = NULL;
+
+ mutex_destroy(&mgr->delayed_destroy_lock);
++ mutex_destroy(&mgr->payload_lock);
+ mutex_destroy(&mgr->qlock);
+ mutex_destroy(&mgr->lock);
+ mutex_destroy(&mgr->up_req_lock);
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+index 03604a37931c..e01a40f35284 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+@@ -52,7 +52,6 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
+ struct drm_atomic_state *state = crtc_state->uapi.state;
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+- struct drm_dp_mst_topology_state *mst_state;
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+@@ -60,28 +59,22 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
+ &crtc_state->hw.adjusted_mode;
+ int bpp, slots = -EINVAL;
+
+- mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
+- if (IS_ERR(mst_state))
+- return PTR_ERR(mst_state);
+-
+ crtc_state->lane_count = limits->max_lane_count;
+ crtc_state->port_clock = limits->max_rate;
+
+- // TODO: Handle pbn_div changes by adding a new MST helper
+- if (!mst_state->pbn_div) {
+- mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
+- limits->max_rate,
+- limits->max_lane_count);
+- }
+-
+ for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
++
+ crtc_state->pipe_bpp = bpp;
+
+ crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
+ crtc_state->pipe_bpp,
+ false);
+ slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
+- connector->port, crtc_state->pbn);
++ connector->port,
++ crtc_state->pbn,
++ drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
++ crtc_state->port_clock,
++ crtc_state->lane_count));
+ if (slots == -EDEADLK)
+ return slots;
+ if (slots >= 0)
+@@ -364,17 +357,21 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
+ struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_connector *connector =
+ to_intel_connector(old_conn_state->connector);
+- struct drm_dp_mst_topology_state *mst_state =
+- drm_atomic_get_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
++ int start_slot = intel_dp_is_uhbr(old_crtc_state) ? 0 : 1;
++ int ret;
+
+ drm_dbg_kms(&i915->drm, "active links %d\n",
+ intel_dp->active_mst_links);
+
+ intel_hdcp_disable(intel_mst->connector);
+
+- drm_dp_remove_payload(&intel_dp->mst_mgr, mst_state,
+- drm_atomic_get_mst_payload_state(mst_state, connector->port));
++ drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
++
++ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, start_slot);
++ if (ret) {
++ drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret);
++ }
+
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
+ }
+@@ -402,6 +399,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
+
+ intel_disable_transcoder(old_crtc_state);
+
++ drm_dp_update_payload_part2(&intel_dp->mst_mgr);
++
+ clear_act_sent(encoder, old_crtc_state);
+
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
+@@ -409,6 +408,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
+
+ wait_for_act_sent(encoder, old_crtc_state);
+
++ drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
++
+ intel_ddi_disable_transcoder_func(old_crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+@@ -475,8 +476,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+- struct drm_dp_mst_topology_state *mst_state =
+- drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
++ int start_slot = intel_dp_is_uhbr(pipe_config) ? 0 : 1;
+ int ret;
+ bool first_mst_stream;
+
+@@ -502,13 +502,16 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
+ dig_port->base.pre_enable(state, &dig_port->base,
+ pipe_config, NULL);
+
++ ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
++ connector->port,
++ pipe_config->pbn,
++ pipe_config->dp_m_n.tu);
++ if (!ret)
++ drm_err(&dev_priv->drm, "failed to allocate vcpi\n");
++
+ intel_dp->active_mst_links++;
+
+- ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state,
+- drm_atomic_get_mst_payload_state(mst_state, connector->port));
+- if (ret < 0)
+- drm_err(&dev_priv->drm, "Failed to create MST payload for %s: %d\n",
+- connector->base.name, ret);
++ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, start_slot);
+
+ /*
+ * Before Gen 12 this is not done as part of
+@@ -531,10 +534,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &dig_port->dp;
+- struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+- struct drm_dp_mst_topology_state *mst_state =
+- drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ enum transcoder trans = pipe_config->cpu_transcoder;
+
+ drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
+@@ -562,8 +562,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
+
+ wait_for_act_sent(encoder, pipe_config);
+
+- drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base,
+- drm_atomic_get_mst_payload_state(mst_state, connector->port));
++ drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+
+ if (DISPLAY_VER(dev_priv) >= 14 && pipe_config->fec_enable)
+ intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(trans), 0,
+@@ -950,6 +949,8 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
+ struct intel_dp *intel_dp = &dig_port->dp;
+ enum port port = dig_port->base.port;
+ int ret;
++ int max_source_rate =
++ intel_dp->source_rates[intel_dp->num_source_rates - 1];
+
+ if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
+ return 0;
+@@ -965,7 +966,10 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
+ /* create encoders */
+ intel_dp_create_fake_mst_encoders(dig_port);
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
+- &intel_dp->aux, 16, 3, conn_base_id);
++ &intel_dp->aux, 16, 3,
++ dig_port->max_lanes,
++ max_source_rate,
++ conn_base_id);
+ if (ret) {
+ intel_dp->mst_mgr.cbs = NULL;
+ return ret;
+diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
+index 6406fd487ee5..987e02eea66a 100644
+--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
++++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
+@@ -31,30 +31,8 @@
+
+ static int intel_conn_to_vcpi(struct intel_connector *connector)
+ {
+- struct drm_dp_mst_topology_mgr *mgr;
+- struct drm_dp_mst_atomic_payload *payload;
+- struct drm_dp_mst_topology_state *mst_state;
+- int vcpi = 0;
+-
+ /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
+- if (!connector->port)
+- return 0;
+- mgr = connector->port->mgr;
+-
+- drm_modeset_lock(&mgr->base.lock, NULL);
+- mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
+- payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
+- if (drm_WARN_ON(mgr->dev, !payload))
+- goto out;
+-
+- vcpi = payload->vcpi;
+- if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
+- vcpi = 0;
+- goto out;
+- }
+-out:
+- drm_modeset_unlock(&mgr->base.lock);
+- return vcpi;
++ return connector->port ? connector->port->vcpi.vcpi : 0;
+ }
+
+ /*
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+index 33c97d510999..8400a5d8ea6e 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -932,7 +932,6 @@ struct nv50_msto {
+ struct nv50_head *head;
+ struct nv50_mstc *mstc;
+ bool disabled;
+- bool enabled;
+ };
+
+ struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder)
+@@ -949,36 +948,58 @@ struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder)
+ }
+
+ static void
+-nv50_msto_cleanup(struct drm_atomic_state *state,
+- struct drm_dp_mst_topology_state *mst_state,
+- struct drm_dp_mst_topology_mgr *mgr,
+- struct nv50_msto *msto)
++nv50_msto_cleanup(struct nv50_msto *msto)
+ {
+ struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
+- struct drm_dp_mst_atomic_payload *payload =
+- drm_atomic_get_mst_payload_state(mst_state, msto->mstc->port);
++
++ struct nv50_mstc *mstc = msto->mstc;
++ struct nv50_mstm *mstm = mstc->mstm;
++
++ if (!msto->disabled)
++ return;
+
+ NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
+
+- if (msto->disabled) {
+- msto->mstc = NULL;
+- msto->disabled = false;
+- } else if (msto->enabled) {
+- drm_dp_add_payload_part2(mgr, state, payload);
+- msto->enabled = false;
++ drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
++
++ msto->mstc = NULL;
++ msto->disabled = false;
++}
++
++static struct drm_dp_payload *
++nv50_msto_payload(struct nv50_msto *msto)
++{
++ struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
++ struct nv50_mstc *mstc = msto->mstc;
++ struct nv50_mstm *mstm = mstc->mstm;
++ int vcpi = mstc->port->vcpi.vcpi, i;
++
++ WARN_ON(!mutex_is_locked(&mstm->mgr.payload_lock));
++
++ NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
++ for (i = 0; i < mstm->mgr.max_payloads; i++) {
++ struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
++ NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
++ mstm->outp->base.base.name, i, payload->vcpi,
++ payload->start_slot, payload->num_slots);
++ }
++
++ for (i = 0; i < mstm->mgr.max_payloads; i++) {
++ struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
++ if (payload->vcpi == vcpi)
++ return payload;
+ }
++
++ return NULL;
+ }
+
+ static void
+-nv50_msto_prepare(struct drm_atomic_state *state,
+- struct drm_dp_mst_topology_state *mst_state,
+- struct drm_dp_mst_topology_mgr *mgr,
+- struct nv50_msto *msto)
++nv50_msto_prepare(struct nv50_msto *msto)
+ {
+ struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
+- struct drm_dp_mst_atomic_payload *payload;
++ struct drm_dp_payload *payload = NULL;
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
+@@ -990,27 +1011,28 @@ nv50_msto_prepare(struct drm_atomic_state *state,
+ (0x0100 << msto->head->base.index),
+ };
+
++ mutex_lock(&mstm->mgr.payload_lock);
++
+ NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
+
+- payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
++ if (mstc->port->vcpi.vcpi > 0)
++ payload = nv50_msto_payload(msto);
+
+- // TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
+- if (msto->disabled) {
+- drm_dp_remove_payload(mgr, mst_state, payload);
+- } else {
+- if (msto->enabled)
+- drm_dp_add_payload_part1(mgr, mst_state, payload);
++ if (payload) {
++ NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
++ msto->encoder.name, msto->head->base.base.name,
++ payload->start_slot, payload->num_slots,
++ mstc->port->vcpi.pbn, mstc->port->vcpi.aligned_pbn);
+
+- args.vcpi.start_slot = payload->vc_start_slot;
+- args.vcpi.num_slots = payload->time_slots;
++ args.vcpi.start_slot = payload->start_slot;
++ args.vcpi.num_slots = payload->num_slots;
+ args.vcpi.pbn = payload->pbn;
+- args.vcpi.aligned_pbn = payload->time_slots * mst_state->pbn_div;
+- }
++ } else {
++ NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
++ msto->encoder.name, msto->head->base.base.name, 0, 0, 0, 0);
++ }
+
+- NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
+- msto->encoder.name, msto->head->base.base.name,
+- args.vcpi.start_slot, args.vcpi.num_slots,
+- args.vcpi.pbn, args.vcpi.aligned_pbn);
++ mutex_unlock(&mstm->mgr.payload_lock);
+
+ nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args));
+ }
+@@ -1022,7 +1044,6 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
+ {
+ struct drm_atomic_state *state = crtc_state->state;
+ struct drm_connector *connector = conn_state->connector;
+- struct drm_dp_mst_topology_state *mst_state;
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ struct nv50_mstm *mstm = mstc->mstm;
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
+@@ -1050,18 +1071,8 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
+ false);
+ }
+
+- mst_state = drm_atomic_get_mst_topology_state(state, &mstm->mgr);
+- if (IS_ERR(mst_state))
+- return PTR_ERR(mst_state);
+-
+- if (!mst_state->pbn_div) {
+- struct nouveau_encoder *outp = mstc->mstm->outp;
+-
+- mst_state->pbn_div = drm_dp_get_vc_payload_bw(&mstm->mgr,
+- outp->dp.link_bw, outp->dp.link_nr);
+- }
+-
+- slots = drm_dp_atomic_find_time_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn);
++ slots = drm_dp_atomic_find_time_slots(state, &mstm->mgr, mstc->port,
++ asyh->dp.pbn, 0);
+ if (slots < 0)
+ return slots;
+
+@@ -1093,6 +1104,7 @@ nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ u8 proto;
++ bool r;
+
+ drm_connector_list_iter_begin(encoder->dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+@@ -1107,6 +1119,10 @@ nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st
+ if (WARN_ON(!mstc))
+ return;
+
++ r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, asyh->dp.pbn, asyh->dp.tu);
++ if (!r)
++ DRM_DEBUG_KMS("Failed to allocate VCPI\n");
++
+ if (!mstm->links++)
+ nv50_outp_acquire(mstm->outp, false /*XXX: MST audio.*/);
+
+@@ -1119,7 +1135,6 @@ nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st
+ nv50_dp_bpc_to_depth(asyh->or.bpc));
+
+ msto->mstc = mstc;
+- msto->enabled = true;
+ mstm->modified = true;
+ }
+
+@@ -1130,6 +1145,8 @@ nv50_msto_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *s
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
+
++ drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
++
+ mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
+ mstm->modified = true;
+ if (!--mstm->links)
+@@ -1349,9 +1366,7 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
+ }
+
+ static void
+-nv50_mstm_cleanup(struct drm_atomic_state *state,
+- struct drm_dp_mst_topology_state *mst_state,
+- struct nv50_mstm *mstm)
++nv50_mstm_cleanup(struct nv50_mstm *mstm)
+ {
+ struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
+ struct drm_encoder *encoder;
+@@ -1359,12 +1374,14 @@ nv50_mstm_cleanup(struct drm_atomic_state *state,
+ NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
+ drm_dp_check_act_status(&mstm->mgr);
+
++ drm_dp_update_payload_part2(&mstm->mgr);
++
+ drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ if (mstc && mstc->mstm == mstm)
+- nv50_msto_cleanup(state, mst_state, &mstm->mgr, msto);
++ nv50_msto_cleanup(msto);
+ }
+ }
+
+@@ -1372,34 +1389,20 @@ nv50_mstm_cleanup(struct drm_atomic_state *state,
+ }
+
+ static void
+-nv50_mstm_prepare(struct drm_atomic_state *state,
+- struct drm_dp_mst_topology_state *mst_state,
+- struct nv50_mstm *mstm)
++nv50_mstm_prepare(struct nv50_mstm *mstm)
+ {
+ struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
+ struct drm_encoder *encoder;
+
+ NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
++ drm_dp_update_payload_part1(&mstm->mgr, 1);
+
+- /* Disable payloads first */
+- drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
+- if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+- struct nv50_msto *msto = nv50_msto(encoder);
+- struct nv50_mstc *mstc = msto->mstc;
+- if (mstc && mstc->mstm == mstm && msto->disabled)
+- nv50_msto_prepare(state, mst_state, &mstm->mgr, msto);
+- }
+- }
+-
+- /* Add payloads for new heads, while also updating the start slots of any unmodified (but
+- * active) heads that may have had their VC slots shifted left after the previous step
+- */
+ drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+- if (mstc && mstc->mstm == mstm && !msto->disabled)
+- nv50_msto_prepare(state, mst_state, &mstm->mgr, msto);
++ if (mstc && mstc->mstm == mstm)
++ nv50_msto_prepare(msto);
+ }
+ }
+
+@@ -1596,7 +1599,9 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
+ mstm->mgr.cbs = &nv50_mstm;
+
+ ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
+- max_payloads, conn_base_id);
++ max_payloads, outp->dcb->dpconf.link_nr,
++ drm_dp_bw_code_to_link_rate(outp->dcb->dpconf.link_bw),
++ conn_base_id);
+ if (ret)
+ return ret;
+
+@@ -2048,20 +2053,20 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
+ static void
+ nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
+ {
+- struct drm_dp_mst_topology_mgr *mgr;
+- struct drm_dp_mst_topology_state *mst_state;
+ struct nouveau_drm *drm = nouveau_drm(state->dev);
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_core *core = disp->core;
+ struct nv50_mstm *mstm;
+- int i;
++ struct drm_encoder *encoder;
+
+ NV_ATOMIC(drm, "commit core %08x\n", interlock[NV50_DISP_INTERLOCK_BASE]);
+
+- for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+- mstm = nv50_mstm(mgr);
+- if (mstm->modified)
+- nv50_mstm_prepare(state, mst_state, mstm);
++ drm_for_each_encoder(encoder, drm->dev) {
++ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
++ mstm = nouveau_encoder(encoder)->dp.mstm;
++ if (mstm && mstm->modified)
++ nv50_mstm_prepare(mstm);
++ }
+ }
+
+ core->func->ntfy_init(disp->sync, NV50_DISP_CORE_NTFY);
+@@ -2070,10 +2075,12 @@ nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
+ disp->core->chan.base.device))
+ NV_ERROR(drm, "core notifier timeout\n");
+
+- for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+- mstm = nv50_mstm(mgr);
+- if (mstm->modified)
+- nv50_mstm_cleanup(state, mst_state, mstm);
++ drm_for_each_encoder(encoder, drm->dev) {
++ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
++ mstm = nouveau_encoder(encoder)->dp.mstm;
++ if (mstm && mstm->modified)
++ nv50_mstm_cleanup(mstm);
++ }
+ }
+ }
+
+diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
+index 41fd8352ab65..1d2f77835de5 100644
+--- a/include/drm/display/drm_dp_mst_helper.h
++++ b/include/drm/display/drm_dp_mst_helper.h
+@@ -48,6 +48,20 @@ struct drm_dp_mst_topology_ref_history {
+
+ struct drm_dp_mst_branch;
+
++/**
++ * struct drm_dp_vcpi - Virtual Channel Payload Identifier
++ * @vcpi: Virtual channel ID.
++ * @pbn: Payload Bandwidth Number for this channel
++ * @aligned_pbn: PBN aligned with slot size
++ * @num_slots: number of slots for this PBN
++ */
++struct drm_dp_vcpi {
++ int vcpi;
++ int pbn;
++ int aligned_pbn;
++ int num_slots;
++};
++
+ /**
+ * struct drm_dp_mst_port - MST port
+ * @port_num: port number
+@@ -131,6 +145,7 @@ struct drm_dp_mst_port {
+ struct drm_dp_aux *passthrough_aux;
+ struct drm_dp_mst_branch *parent;
+
++ struct drm_dp_vcpi vcpi;
+ struct drm_connector *connector;
+ struct drm_dp_mst_topology_mgr *mgr;
+
+@@ -515,6 +530,20 @@ struct drm_dp_mst_topology_cbs {
+ void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr);
+ };
+
++#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
++
++#define DP_PAYLOAD_LOCAL 1
++#define DP_PAYLOAD_REMOTE 2
++#define DP_PAYLOAD_DELETE_LOCAL 3
++
++struct drm_dp_payload {
++ int payload_state;
++ int start_slot;
++ int num_slots;
++ int vcpi;
++ int pbn;
++};
++
+ #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
+
+ /**
+@@ -527,34 +556,6 @@ struct drm_dp_mst_atomic_payload {
+ /** @port: The MST port assigned to this payload */
+ struct drm_dp_mst_port *port;
+
+- /**
+- * @vc_start_slot: The time slot that this payload starts on. Because payload start slots
+- * can't be determined ahead of time, the contents of this value are UNDEFINED at atomic
+- * check time. This shouldn't usually matter, as the start slot should never be relevant for
+- * atomic state computations.
+- *
+- * Since this value is determined at commit time instead of check time, this value is
+- * protected by the MST helpers ensuring that async commits operating on the given topology
+- * never run in parallel. In the event that a driver does need to read this value (e.g. to
+- * inform hardware of the starting timeslot for a payload), the driver may either:
+- *
+- * * Read this field during the atomic commit after
+- * drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the
+- * previous MST states payload start slots have been copied over to the new state. Note
+- * that a new start slot won't be assigned/removed from this payload until
+- * drm_dp_add_payload_part1()/drm_dp_remove_payload() have been called.
+- * * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to
+- * get committed to hardware by calling drm_crtc_commit_wait() on each of the
+- * &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps.
+- *
+- * If neither of the two above solutions suffice (e.g. the driver needs to read the start
+- * slot in the middle of an atomic commit without waiting for some reason), then drivers
+- * should cache this value themselves after changing payloads.
+- */
+- s8 vc_start_slot;
+-
+- /** @vcpi: The Virtual Channel Payload Identifier */
+- u8 vcpi;
+ /**
+ * @time_slots:
+ * The number of timeslots allocated to this payload from the source DP Tx to
+@@ -582,6 +583,8 @@ struct drm_dp_mst_topology_state {
+ /** @base: Base private state for atomic */
+ struct drm_private_state base;
+
++ /** @payloads: The list of payloads being created/destroyed in this state */
++ struct list_head payloads;
+ /** @mgr: The topology manager */
+ struct drm_dp_mst_topology_mgr *mgr;
+
+@@ -598,21 +601,10 @@ struct drm_dp_mst_topology_state {
+ /** @num_commit_deps: The number of CRTC commits in @commit_deps */
+ size_t num_commit_deps;
+
+- /** @payload_mask: A bitmask of allocated VCPIs, used for VCPI assignments */
+- u32 payload_mask;
+- /** @payloads: The list of payloads being created/destroyed in this state */
+- struct list_head payloads;
+-
+ /** @total_avail_slots: The total number of slots this topology can handle (63 or 64) */
+ u8 total_avail_slots;
+ /** @start_slot: The first usable time slot in this topology (1 or 0) */
+ u8 start_slot;
+-
+- /**
+- * @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this
+- * out itself.
+- */
+- int pbn_div;
+ };
+
+ #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
+@@ -652,6 +644,14 @@ struct drm_dp_mst_topology_mgr {
+ * @max_payloads: maximum number of payloads the GPU can generate.
+ */
+ int max_payloads;
++ /**
++ * @max_lane_count: maximum number of lanes the GPU can drive.
++ */
++ int max_lane_count;
++ /**
++ * @max_link_rate: maximum link rate per lane GPU can output, in kHz.
++ */
++ int max_link_rate;
+ /**
+ * @conn_base_id: DRM connector ID this mgr is connected to. Only used
+ * to build the MST connector path value.
+@@ -694,20 +694,6 @@ struct drm_dp_mst_topology_mgr {
+ */
+ bool payload_id_table_cleared : 1;
+
+- /**
+- * @payload_count: The number of currently active payloads in hardware. This value is only
+- * intended to be used internally by MST helpers for payload tracking, and is only safe to
+- * read/write from the atomic commit (not check) context.
+- */
+- u8 payload_count;
+-
+- /**
+- * @next_start_slot: The starting timeslot to use for new VC payloads. This value is used
+- * internally by MST helpers for payload tracking, and is only safe to read/write from the
+- * atomic commit (not check) context.
+- */
+- u8 next_start_slot;
+-
+ /**
+ * @mst_primary: Pointer to the primary/first branch device.
+ */
+@@ -721,6 +707,10 @@ struct drm_dp_mst_topology_mgr {
+ * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
+ */
+ u8 sink_count;
++ /**
++ * @pbn_div: PBN to slots divisor.
++ */
++ int pbn_div;
+
+ /**
+ * @funcs: Atomic helper callbacks
+@@ -737,6 +727,32 @@ struct drm_dp_mst_topology_mgr {
+ */
+ struct list_head tx_msg_downq;
+
++ /**
++ * @payload_lock: Protect payload information.
++ */
++ struct mutex payload_lock;
++ /**
++ * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
++ * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of
++ * this array is determined by @max_payloads.
++ */
++ struct drm_dp_vcpi **proposed_vcpis;
++ /**
++ * @payloads: Array of payloads. The size of this array is determined
++ * by @max_payloads.
++ */
++ struct drm_dp_payload *payloads;
++ /**
++ * @payload_mask: Elements of @payloads actually in use. Since
++ * reallocation of active outputs isn't possible gaps can be created by
++ * disabling outputs out of order compared to how they've been enabled.
++ */
++ unsigned long payload_mask;
++ /**
++ * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
++ */
++ unsigned long vcpi_mask;
++
+ /**
+ * @tx_waitq: Wait to queue stall for the tx worker.
+ */
+@@ -808,7 +824,9 @@ struct drm_dp_mst_topology_mgr {
+ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_device *dev, struct drm_dp_aux *aux,
+ int max_dpcd_transaction_bytes,
+- int max_payloads, int conn_base_id);
++ int max_payloads,
++ int max_lane_count, int max_link_rate,
++ int conn_base_id);
+
+ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
+
+@@ -831,17 +849,28 @@ int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
+
+ int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
+
++bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_mst_port *port, int pbn, int slots);
++
++int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
++
++
++void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
++
+ void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
+
+-int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
+- struct drm_dp_mst_topology_state *mst_state,
+- struct drm_dp_mst_atomic_payload *payload);
+-int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
+- struct drm_atomic_state *state,
+- struct drm_dp_mst_atomic_payload *payload);
+-void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
+- struct drm_dp_mst_topology_state *mst_state,
+- struct drm_dp_mst_atomic_payload *payload);
++void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_mst_port *port);
++
++
++int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
++ int pbn);
++
++
++int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot);
++
++
++int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
+
+ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
+
+@@ -863,22 +892,17 @@ int drm_dp_mst_connector_late_register(struct drm_connector *connector,
+ void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
+ struct drm_dp_mst_port *port);
+
+-struct drm_dp_mst_topology_state *
+-drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
+- struct drm_dp_mst_topology_mgr *mgr);
+-struct drm_dp_mst_topology_state *
+-drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
+- struct drm_dp_mst_topology_mgr *mgr);
+-struct drm_dp_mst_atomic_payload *
+-drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
+- struct drm_dp_mst_port *port);
++struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
++ struct drm_dp_mst_topology_mgr *mgr);
+ int __must_check
+ drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr,
+- struct drm_dp_mst_port *port, int pbn);
++ struct drm_dp_mst_port *port, int pbn,
++ int pbn_div);
+ int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
+ struct drm_dp_mst_port *port,
+- int pbn, bool enable);
++ int pbn, int pbn_div,
++ bool enable);
+ int __must_check
+ drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
+@@ -902,12 +926,6 @@ void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
+
+ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
+
+-static inline struct drm_dp_mst_topology_state *
+-to_drm_dp_mst_topology_state(struct drm_private_state *state)
+-{
+- return container_of(state, struct drm_dp_mst_topology_state, base);
+-}
+-
+ extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
+
+ /**
+--
+2.39.0
+
diff --git a/0003-btrfs-fix-invalid-leaf-access-due-to-inline-extent-d.patch b/0003-btrfs-fix-invalid-leaf-access-due-to-inline-extent-d.patch
new file mode 100644
index 0000000..7d3468b
--- /dev/null
+++ b/0003-btrfs-fix-invalid-leaf-access-due-to-inline-extent-d.patch
@@ -0,0 +1,67 @@
+From 0a772f0e9788d760313382ec21b81dca83515966 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Thu, 12 Jan 2023 14:17:20 +0000
+Subject: [PATCH 3/5] btrfs: fix invalid leaf access due to inline extent
+ during lseek
+
+During lseek, for SEEK_DATA and SEEK_HOLE modes, we access the disk_bytenr
+of anextent without checking its type. However inline extents have their
+data starting the offset of the disk_bytenr field, so accessing that field
+when we have an inline extent can result in either of the following:
+
+1) Interpret the inline extent's data as a disk_bytenr value;
+
+2) In case the inline data is less than 8 bytes, we access part of some
+ other item in the leaf, or unused space in the leaf;
+
+3) In case the inline data is less than 8 bytes and the extent item is
+ the first item in the leaf, we can access beyond the leaf's limit.
+
+So fix this by not accessing the disk_bytenr field if we have an inline
+extent.
+
+Fixes: b6e833567ea1 ("btrfs: make hole and data seeking a lot more efficient")
+Reported-by: Matthias Schoepfer <matthias.schoepfer@googlemail.com>
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=216908
+Link: https://lore.kernel.org/linux-btrfs/7f25442f-b121-2a3a-5a3d-22bcaae83cd4@leemhuis.info/
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Cherry-picked-for: https://bugs.archlinux.org/task/77041
+---
+ fs/btrfs/file.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 9bef8eaa074a..23056d9914d8 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -3838,6 +3838,7 @@ static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset,
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_file_extent_item *extent;
+ u64 extent_end;
++ u8 type;
+
+ if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+@@ -3892,10 +3893,16 @@ static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset,
+
+ extent = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
++ type = btrfs_file_extent_type(leaf, extent);
+
+- if (btrfs_file_extent_disk_bytenr(leaf, extent) == 0 ||
+- btrfs_file_extent_type(leaf, extent) ==
+- BTRFS_FILE_EXTENT_PREALLOC) {
++ /*
++ * Can't access the extent's disk_bytenr field if this is an
++ * inline extent, since at that offset, it's where the extent
++ * data starts.
++ */
++ if (type == BTRFS_FILE_EXTENT_PREALLOC ||
++ (type == BTRFS_FILE_EXTENT_REG &&
++ btrfs_file_extent_disk_bytenr(leaf, extent) == 0)) {
+ /*
+ * Explicit hole or prealloc extent, search for delalloc.
+ * A prealloc extent is treated like a hole.
+--
+2.39.0
+
diff --git a/0003-drm-sched-add-DRM_SCHED_FENCE_DONT_PIPELINE-flag.patch b/0003-drm-sched-add-DRM_SCHED_FENCE_DONT_PIPELINE-flag.patch
deleted file mode 100644
index 04d18c0..0000000
--- a/0003-drm-sched-add-DRM_SCHED_FENCE_DONT_PIPELINE-flag.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-From 95123f7acd2c486547a808631ea879eb5782738d Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
-Date: Fri, 7 Oct 2022 09:51:13 +0200
-Subject: [PATCH 3/6] drm/sched: add DRM_SCHED_FENCE_DONT_PIPELINE flag
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Setting this flag on a scheduler fence prevents pipelining of jobs
-depending on this fence. In other words we always insert a full CPU
-round trip before dependen jobs are pushed to the pipeline.
-
-Signed-off-by: Christian König <christian.koenig@amd.com>
----
- drivers/gpu/drm/scheduler/sched_entity.c | 3 ++-
- include/drm/gpu_scheduler.h | 9 +++++++++
- 2 files changed, 11 insertions(+), 1 deletion(-)
-
-diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
-index 7ef1a086a6fb..4b913dbb7d7b 100644
---- a/drivers/gpu/drm/scheduler/sched_entity.c
-+++ b/drivers/gpu/drm/scheduler/sched_entity.c
-@@ -389,7 +389,8 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
- }
-
- s_fence = to_drm_sched_fence(fence);
-- if (s_fence && s_fence->sched == sched) {
-+ if (s_fence && s_fence->sched == sched &&
-+ !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
-
- /*
- * Fence is from the same scheduler, only need to wait for
-diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
-index addb135eeea6..289a33e80639 100644
---- a/include/drm/gpu_scheduler.h
-+++ b/include/drm/gpu_scheduler.h
-@@ -32,6 +32,15 @@
-
- #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
-
-+/**
-+ * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining
-+ *
-+ * Setting this flag on a scheduler fence prevents pipelining of jobs depending
-+ * on this fence. In other words we always insert a full CPU round trip before
-+ * dependen jobs are pushed to the hw queue.
-+ */
-+#define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
-+
- struct drm_gem_object;
-
- struct drm_gpu_scheduler;
---
-2.38.1
-
diff --git a/0004-drm-amdgpu-use-DRM_SCHED_FENCE_DONT_PIPELINE-for-VM-.patch b/0004-drm-amdgpu-use-DRM_SCHED_FENCE_DONT_PIPELINE-for-VM-.patch
deleted file mode 100644
index e8eff5c..0000000
--- a/0004-drm-amdgpu-use-DRM_SCHED_FENCE_DONT_PIPELINE-for-VM-.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From 80d7a84de8dde6b960af432751bde998b70acc98 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
-Date: Fri, 7 Oct 2022 10:59:58 +0200
-Subject: [PATCH 4/6] drm/amdgpu: use DRM_SCHED_FENCE_DONT_PIPELINE for VM
- updates
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Make sure that we always have a CPU round trip to let the submission
-code correctly decide if a TLB flush is necessary or not.
-
-Signed-off-by: Christian König <christian.koenig@amd.com>
----
- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 9 ++++++++-
- 1 file changed, 8 insertions(+), 1 deletion(-)
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
-index 718db7d98e5a..25ad3c7fa24b 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
-@@ -115,8 +115,15 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
- amdgpu_bo_fence(p->vm->root.bo, f, true);
- }
-
-- if (fence && !p->immediate)
-+ if (fence && !p->immediate) {
-+ /*
-+ * Most hw generations now have a separate queue for page table
-+ * updates, but when the queue is shared with userspace we need
-+ * the extra CPU round trip to correctly flush the TLB.
-+ */
-+ set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &f->flags);
- swap(*fence, f);
-+ }
- dma_fence_put(f);
- return 0;
-
---
-2.38.1
-
diff --git a/0004-wifi-mac80211-fix-initialization-of-rx-link-and-rx-l.patch b/0004-wifi-mac80211-fix-initialization-of-rx-link-and-rx-l.patch
new file mode 100644
index 0000000..0820c6c
--- /dev/null
+++ b/0004-wifi-mac80211-fix-initialization-of-rx-link-and-rx-l.patch
@@ -0,0 +1,459 @@
+From 93753514870b99ede0d3d94e176e3c35f55aab40 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@nbd.name>
+Date: Fri, 30 Dec 2022 21:07:47 +0100
+Subject: [PATCH 4/5] wifi: mac80211: fix initialization of rx->link and
+ rx->link_sta
+
+There are some codepaths that do not initialize rx->link_sta properly. This
+causes a crash in places which assume that rx->link_sta is valid if rx->sta
+is valid.
+One known instance is triggered by __ieee80211_rx_h_amsdu being called from
+fast-rx. It results in a crash like this one:
+
+ BUG: kernel NULL pointer dereference, address: 00000000000000a8
+ #PF: supervisor write access in kernel mode
+ #PF: error_code(0x0002) - not-present page PGD 0 P4D 0
+ Oops: 0002 [#1] PREEMPT SMP PTI
+ CPU: 1 PID: 506 Comm: mt76-usb-rx phy Tainted: G E 6.1.0-debian64x+1.7 #3
+ Hardware name: ZOTAC ZBOX-ID92/ZBOX-IQ01/ZBOX-ID92/ZBOX-IQ01, BIOS B220P007 05/21/2014
+ RIP: 0010:ieee80211_deliver_skb+0x62/0x1f0 [mac80211]
+ Code: 00 48 89 04 24 e8 9e a7 c3 df 89 c0 48 03 1c c5 a0 ea 39 a1 4c 01 6b 08 48 ff 03 48
+ 83 7d 28 00 74 11 48 8b 45 30 48 63 55 44 <48> 83 84 d0 a8 00 00 00 01 41 8b 86 c0
+ 11 00 00 8d 50 fd 83 fa 01
+ RSP: 0018:ffff999040803b10 EFLAGS: 00010286
+ RAX: 0000000000000000 RBX: ffffb9903f496480 RCX: 0000000000000000
+ RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
+ RBP: ffff999040803ce0 R08: 0000000000000000 R09: 0000000000000000
+ R10: 0000000000000000 R11: 0000000000000000 R12: ffff8d21828ac900
+ R13: 000000000000004a R14: ffff8d2198ed89c0 R15: ffff8d2198ed8000
+ FS: 0000000000000000(0000) GS:ffff8d24afe80000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 00000000000000a8 CR3: 0000000429810002 CR4: 00000000001706e0
+ Call Trace:
+ <TASK>
+ __ieee80211_rx_h_amsdu+0x1b5/0x240 [mac80211]
+ ? ieee80211_prepare_and_rx_handle+0xcdd/0x1320 [mac80211]
+ ? __local_bh_enable_ip+0x3b/0xa0
+ ieee80211_prepare_and_rx_handle+0xcdd/0x1320 [mac80211]
+ ? prepare_transfer+0x109/0x1a0 [xhci_hcd]
+ ieee80211_rx_list+0xa80/0xda0 [mac80211]
+ mt76_rx_complete+0x207/0x2e0 [mt76]
+ mt76_rx_poll_complete+0x357/0x5a0 [mt76]
+ mt76u_rx_worker+0x4f5/0x600 [mt76_usb]
+ ? mt76_get_min_avg_rssi+0x140/0x140 [mt76]
+ __mt76_worker_fn+0x50/0x80 [mt76]
+ kthread+0xed/0x120
+ ? kthread_complete_and_exit+0x20/0x20
+ ret_from_fork+0x22/0x30
+
+Since the initialization of rx->link and rx->link_sta is rather convoluted
+and duplicated in many places, clean it up by using a helper function to
+set it.
+
+Fixes: ccdde7c74ffd ("wifi: mac80211: properly implement MLO key handling")
+Fixes: b320d6c456ff ("wifi: mac80211: use correct rx link_sta instead of default")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Link: https://lore.kernel.org/r/20221230200747.19040-1-nbd@nbd.name
+[remove unnecessary rx->sta->sta.mlo check]
+Cc: stable@vger.kernel.org
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Cherry-picked-for: https://bugs.archlinux.org/task/76922
+---
+ net/mac80211/rx.c | 222 +++++++++++++++++++++-------------------------
+ 1 file changed, 99 insertions(+), 123 deletions(-)
+
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index f99416d2e144..3262ebb24092 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -4070,6 +4070,58 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
+ #undef CALL_RXH
+ }
+
++static bool
++ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id)
++{
++ if (!sta->mlo)
++ return false;
++
++ return !!(sta->valid_links & BIT(link_id));
++}
++
++static bool ieee80211_rx_data_set_link(struct ieee80211_rx_data *rx,
++ u8 link_id)
++{
++ rx->link_id = link_id;
++ rx->link = rcu_dereference(rx->sdata->link[link_id]);
++
++ if (!rx->sta)
++ return rx->link;
++
++ if (!ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, link_id))
++ return false;
++
++ rx->link_sta = rcu_dereference(rx->sta->link[link_id]);
++
++ return rx->link && rx->link_sta;
++}
++
++static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx,
++ struct ieee80211_sta *pubsta,
++ int link_id)
++{
++ struct sta_info *sta;
++
++ sta = container_of(pubsta, struct sta_info, sta);
++
++ rx->link_id = link_id;
++ rx->sta = sta;
++
++ if (sta) {
++ rx->local = sta->sdata->local;
++ if (!rx->sdata)
++ rx->sdata = sta->sdata;
++ rx->link_sta = &sta->deflink;
++ }
++
++ if (link_id < 0)
++ rx->link = &rx->sdata->deflink;
++ else if (!ieee80211_rx_data_set_link(rx, link_id))
++ return false;
++
++ return true;
++}
++
+ /*
+ * This function makes calls into the RX path, therefore
+ * it has to be invoked under RCU read lock.
+@@ -4078,16 +4130,19 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
+ {
+ struct sk_buff_head frames;
+ struct ieee80211_rx_data rx = {
+- .sta = sta,
+- .sdata = sta->sdata,
+- .local = sta->local,
+ /* This is OK -- must be QoS data frame */
+ .security_idx = tid,
+ .seqno_idx = tid,
+- .link_id = -1,
+ };
+ struct tid_ampdu_rx *tid_agg_rx;
+- u8 link_id;
++ int link_id = -1;
++
++ /* FIXME: statistics won't be right with this */
++ if (sta->sta.valid_links)
++ link_id = ffs(sta->sta.valid_links) - 1;
++
++ if (!ieee80211_rx_data_set_sta(&rx, &sta->sta, link_id))
++ return;
+
+ tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
+ if (!tid_agg_rx)
+@@ -4107,10 +4162,6 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
+ };
+ drv_event_callback(rx.local, rx.sdata, &event);
+ }
+- /* FIXME: statistics won't be right with this */
+- link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0;
+- rx.link = rcu_dereference(sta->sdata->link[link_id]);
+- rx.link_sta = rcu_dereference(sta->link[link_id]);
+
+ ieee80211_rx_handlers(&rx, &frames);
+ }
+@@ -4126,7 +4177,6 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
+ /* This is OK -- must be QoS data frame */
+ .security_idx = tid,
+ .seqno_idx = tid,
+- .link_id = -1,
+ };
+ int i, diff;
+
+@@ -4137,10 +4187,8 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
+
+ sta = container_of(pubsta, struct sta_info, sta);
+
+- rx.sta = sta;
+- rx.sdata = sta->sdata;
+- rx.link = &rx.sdata->deflink;
+- rx.local = sta->local;
++ if (!ieee80211_rx_data_set_sta(&rx, pubsta, -1))
++ return;
+
+ rcu_read_lock();
+ tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
+@@ -4527,15 +4575,6 @@ void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
+ mutex_unlock(&local->sta_mtx);
+ }
+
+-static bool
+-ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id)
+-{
+- if (!sta->mlo)
+- return false;
+-
+- return !!(sta->valid_links & BIT(link_id));
+-}
+-
+ static void ieee80211_rx_8023(struct ieee80211_rx_data *rx,
+ struct ieee80211_fast_rx *fast_rx,
+ int orig_len)
+@@ -4646,7 +4685,6 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
+ struct sk_buff *skb = rx->skb;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+- struct sta_info *sta = rx->sta;
+ int orig_len = skb->len;
+ int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ int snap_offs = hdrlen;
+@@ -4658,7 +4696,6 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ } addrs __aligned(2);
+- struct link_sta_info *link_sta;
+ struct ieee80211_sta_rx_stats *stats;
+
+ /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
+@@ -4761,18 +4798,10 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
+ drop:
+ dev_kfree_skb(skb);
+
+- if (rx->link_id >= 0) {
+- link_sta = rcu_dereference(sta->link[rx->link_id]);
+- if (!link_sta)
+- return true;
+- } else {
+- link_sta = &sta->deflink;
+- }
+-
+ if (fast_rx->uses_rss)
+- stats = this_cpu_ptr(link_sta->pcpu_rx_stats);
++ stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats);
+ else
+- stats = &link_sta->rx_stats;
++ stats = &rx->link_sta->rx_stats;
+
+ stats->dropped++;
+ return true;
+@@ -4790,8 +4819,8 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
+ struct ieee80211_local *local = rx->local;
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+- struct link_sta_info *link_sta = NULL;
+- struct ieee80211_link_data *link;
++ struct link_sta_info *link_sta = rx->link_sta;
++ struct ieee80211_link_data *link = rx->link;
+
+ rx->skb = skb;
+
+@@ -4813,35 +4842,6 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
+ if (!ieee80211_accept_frame(rx))
+ return false;
+
+- if (rx->link_id >= 0) {
+- link = rcu_dereference(rx->sdata->link[rx->link_id]);
+-
+- /* we might race link removal */
+- if (!link)
+- return true;
+- rx->link = link;
+-
+- if (rx->sta) {
+- rx->link_sta =
+- rcu_dereference(rx->sta->link[rx->link_id]);
+- if (!rx->link_sta)
+- return true;
+- }
+- } else {
+- if (rx->sta)
+- rx->link_sta = &rx->sta->deflink;
+-
+- rx->link = &sdata->deflink;
+- }
+-
+- if (unlikely(!is_multicast_ether_addr(hdr->addr1) &&
+- rx->link_id >= 0 && rx->sta && rx->sta->sta.mlo)) {
+- link_sta = rcu_dereference(rx->sta->link[rx->link_id]);
+-
+- if (WARN_ON_ONCE(!link_sta))
+- return true;
+- }
+-
+ if (!consume) {
+ struct skb_shared_hwtstamps *shwt;
+
+@@ -4861,7 +4861,7 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
+ shwt->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
+ }
+
+- if (unlikely(link_sta)) {
++ if (unlikely(rx->sta && rx->sta->sta.mlo)) {
+ /* translate to MLD addresses */
+ if (ether_addr_equal(link->conf->addr, hdr->addr1))
+ ether_addr_copy(hdr->addr1, rx->sdata->vif.addr);
+@@ -4891,6 +4891,7 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_fast_rx *fast_rx;
+ struct ieee80211_rx_data rx;
++ int link_id = -1;
+
+ memset(&rx, 0, sizeof(rx));
+ rx.skb = skb;
+@@ -4907,12 +4908,8 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
+ if (!pubsta)
+ goto drop;
+
+- rx.sta = container_of(pubsta, struct sta_info, sta);
+- rx.sdata = rx.sta->sdata;
+-
+- if (status->link_valid &&
+- !ieee80211_rx_is_valid_sta_link_id(pubsta, status->link_id))
+- goto drop;
++ if (status->link_valid)
++ link_id = status->link_id;
+
+ /*
+ * TODO: Should the frame be dropped if the right link_id is not
+@@ -4921,19 +4918,8 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
+ * link_id is used only for stats purpose and updating the stats on
+ * the deflink is fine?
+ */
+- if (status->link_valid)
+- rx.link_id = status->link_id;
+-
+- if (rx.link_id >= 0) {
+- struct ieee80211_link_data *link;
+-
+- link = rcu_dereference(rx.sdata->link[rx.link_id]);
+- if (!link)
+- goto drop;
+- rx.link = link;
+- } else {
+- rx.link = &rx.sdata->deflink;
+- }
++ if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
++ goto drop;
+
+ fast_rx = rcu_dereference(rx.sta->fast_rx);
+ if (!fast_rx)
+@@ -4951,6 +4937,8 @@ static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
+ {
+ struct link_sta_info *link_sta;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
++ struct sta_info *sta;
++ int link_id = -1;
+
+ /*
+ * Look up link station first, in case there's a
+@@ -4960,24 +4948,19 @@ static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
+ */
+ link_sta = link_sta_info_get_bss(rx->sdata, hdr->addr2);
+ if (link_sta) {
+- rx->sta = link_sta->sta;
+- rx->link_id = link_sta->link_id;
++ sta = link_sta->sta;
++ link_id = link_sta->link_id;
+ } else {
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+
+- rx->sta = sta_info_get_bss(rx->sdata, hdr->addr2);
+- if (rx->sta) {
+- if (status->link_valid &&
+- !ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta,
+- status->link_id))
+- return false;
+-
+- rx->link_id = status->link_valid ? status->link_id : -1;
+- } else {
+- rx->link_id = -1;
+- }
++ sta = sta_info_get_bss(rx->sdata, hdr->addr2);
++ if (status->link_valid)
++ link_id = status->link_id;
+ }
+
++ if (!ieee80211_rx_data_set_sta(rx, &sta->sta, link_id))
++ return false;
++
+ return ieee80211_prepare_and_rx_handle(rx, skb, consume);
+ }
+
+@@ -5036,19 +5019,15 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+
+ if (ieee80211_is_data(fc)) {
+ struct sta_info *sta, *prev_sta;
+- u8 link_id = status->link_id;
++ int link_id = -1;
+
+- if (pubsta) {
+- rx.sta = container_of(pubsta, struct sta_info, sta);
+- rx.sdata = rx.sta->sdata;
++ if (status->link_valid)
++ link_id = status->link_id;
+
+- if (status->link_valid &&
+- !ieee80211_rx_is_valid_sta_link_id(pubsta, link_id))
++ if (pubsta) {
++ if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
+ goto out;
+
+- if (status->link_valid)
+- rx.link_id = status->link_id;
+-
+ /*
+ * In MLO connection, fetch the link_id using addr2
+ * when the driver does not pass link_id in status.
+@@ -5066,7 +5045,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ if (!link_sta)
+ goto out;
+
+- rx.link_id = link_sta->link_id;
++ ieee80211_rx_data_set_link(&rx, link_sta->link_id);
+ }
+
+ if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
+@@ -5082,30 +5061,27 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ continue;
+ }
+
+- if ((status->link_valid &&
+- !ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta,
+- link_id)) ||
+- (!status->link_valid && prev_sta->sta.mlo))
++ rx.sdata = prev_sta->sdata;
++ if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
++ link_id))
++ goto out;
++
++ if (!status->link_valid && prev_sta->sta.mlo)
+ continue;
+
+- rx.link_id = status->link_valid ? link_id : -1;
+- rx.sta = prev_sta;
+- rx.sdata = prev_sta->sdata;
+ ieee80211_prepare_and_rx_handle(&rx, skb, false);
+
+ prev_sta = sta;
+ }
+
+ if (prev_sta) {
+- if ((status->link_valid &&
+- !ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta,
+- link_id)) ||
+- (!status->link_valid && prev_sta->sta.mlo))
++ rx.sdata = prev_sta->sdata;
++ if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
++ link_id))
+ goto out;
+
+- rx.link_id = status->link_valid ? link_id : -1;
+- rx.sta = prev_sta;
+- rx.sdata = prev_sta->sdata;
++ if (!status->link_valid && prev_sta->sta.mlo)
++ goto out;
+
+ if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
+ return;
+--
+2.39.0
+
diff --git a/PKGBUILD b/PKGBUILD
index 00f606d..56e23cc 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -18,9 +18,9 @@ _custom=0
pkgbase=linux
_supver=6
-_majver=0
-_minver=19
-_gccpatchver='20221104'
+_majver=1
+_minver=7
+_gccpatchver='20230105'
_gccpatchker='5.17+'
if [ "$_minver" == "0" ]; then
pkgver=${_supver}.${_majver}
@@ -33,7 +33,7 @@ url='https://kernel.org'
arch=(x86_64)
license=(GPL2)
makedepends=(
- bc libelf pahole cpio perl tar xz
+ bc libelf pahole cpio perl tar xz gettext
xmlto python-sphinx python-sphinx_rtd_theme graphviz imagemagick texlive-latexextra
)
conflicts=('linux-libre')
@@ -43,8 +43,9 @@ source=(
https://www.kernel.org/pub/linux/kernel/v${_supver}.x/${_srcname}.tar.{xz,sign}
config # the main kernel config file
0001-ZEN-Add-sysctl-and-CONFIG-to-disallow-unprivileged-C.patch
- 0003-drm-sched-add-DRM_SCHED_FENCE_DONT_PIPELINE-flag.patch
- 0004-drm-amdgpu-use-DRM_SCHED_FENCE_DONT_PIPELINE-for-VM-.patch
+ 0002-Revert-drm-display-dp_mst-Move-all-payload-info-into.patch
+ 0003-btrfs-fix-invalid-leaf-access-due-to-inline-extent-d.patch
+ 0004-wifi-mac80211-fix-initialization-of-rx-link-and-rx-l.patch
kernel_compiler_patch-${_gccpatchver}.tar.gz::https://github.com/graysky2/kernel_compiler_patch/archive/${_gccpatchver}.tar.gz
ath9k-regdom-hack.patch
raid6-default-algo.patch
@@ -54,22 +55,24 @@ validpgpkeys=(
'647F28654894E3BD457199BE38DBBDC86092693E' # Greg Kroah-Hartman
)
# https://www.kernel.org/pub/linux/kernel/v5.x/sha256sums.asc
-sha256sums=('abe37eb0e2e331bdc7c4110115664e180e7d43b7336de6b4cd2bd1b123d30207'
+sha256sums=('4ab048bad2e7380d3b827f1fad5ad2d2fc4f2e80e1c604d85d1f8781debe600f'
'SKIP'
- 'e490737c0007da7bf9275e4d5f0162b64bb27d31169c9a24c2258c56c76fa43f'
- '2f4d03a8bb21357f88d694b62fc3299944fa1738652dfe888ac0320d5d21f351'
- '671c3852d1adf7095cf82fdecf197c65df4d3003c917b56cee2fc9845cd06883'
- '3d00e39c53c107c87925eaeade32fc7d78e916e588ab5d8e4dd84c33ae748a96'
- '3a8f397b89bad95c46f42c0f80ede7536a4a45a28621e00ed486918a55f905ed'
+ 'a77671535536c1b912f2824cf202e384470b983904fa3d1ab105cfd868872c3d'
+ '602a2fd8d11f86824889318ec33cdb20030cbd713d63514126e9c11dcb78ccf3'
+ '5a9dccbf0edcc0e35c0469de43c17cf890cbd433c6befce68ed6f79c09b244db'
+ 'd98df9708c63a756a2cf00d5bfd1a3644e20b9ddd4cca98fb28dec111a8746e4'
+ '79d1ff9bc74d8e6d45c6200f3d9859cb169b0738befab283f376e17d6ae72b44'
+ '802946f623c69ae1a636b63697c23ca48af31a099415ed837d2c1e168a272d23'
'e9e0d289170b7fb598b572d9c892ae8d1420952034aa415e8b3334f20a58edcc'
'6ab863c8cfe6e0dd53a9d6455872fd4391508a8b97ab92e3f13558a6617b12a6')
-b2sums=('a19a5db545cc8d4d220942b791ced3c2e6b9c8980d1f427de9b3b1804cca5468475d0222b2e905ef96baa229c7df227f149a1703a9c94fc0e0d7126744740e94'
+b2sums=('13c970a5780fd4ed97a0ff5d7c13e2f5cfebf608199ae94973f3a8a109bb961f1effb45bd61a687f5faaf784a84b4c88b5a4bb75b7a1a943f44cab9b6ad37ce1'
'SKIP'
- '02755ec326bde2703f0019d9fa214f6a7db52af43d47dde9ba87cd95bdc40fc4cc47f678a677640e4a472ec46790ba31ac36c37ded650740a1ef4f23b3f5e92b'
- '9d9dd78748f901b9be5876e1b34f4f13cae51384fbbfa653678924c6c8d90fdddf71b8c524aa2bc24548c6111adf160153753bd8a738866a2473459a338df6a0'
- 'd7b384645d39201c43871e3c6067012b8bb2b30bcce3217dbfdd6cc9437e1cf1ab7dbbbcd333cc5b9e04a986a6d1aefa9659cab73fdd308878621104285138b8'
- 'bc9d55712dbcb69a6220ffa98da46ac8ed78c2901e74aa65d4a5f35796a7cc17f2bd87d9d62924c18266f703bea67debf120d5ceff7a21963d1b35e022391252'
- '05bddc2b57189d7e302f32041079bcf60a06938e9afcfd02a0085a1430255286a5b663eed7cdb7f857717c65e9e27af4d15625b17e0a36d1b4ce1cbda6baee2b'
+ '04b86b565c344ded78538cd958465b3268f5f04ac8ca5f6b36b0ad54558102646a22526b5ec250e7a959fb698742b43a025df6c65cd2323f8514fe807fc6b20a'
+ '62b993e415186acc54f11c79f140f9f4df0c62658fa45e4895d131a3729810c181bfd7f13c6059ed882aa1d8ce79651acc30ab42077bae5497b4f0a1d05bf1aa'
+ '11d46f3b7fdef0e1bb19959aa6414e9b59365be0f974077d80d3ac8cedea25f445e5549d0eb463042bf42f355937ae18d0d1ab78d386d65645cc3a2b657893b6'
+ 'f751f7712a50fd7b22a9525921912904a5ec69facf29a967e987f591ec79e9e4d6ce23d3481940b20f1d5d9866c5d82c81655e3b51b9647efbe882a00a5c3172'
+ 'f5298854e65f9e1c4e872b38300f408a47b5d1f0dec655cc2d2815247d4f15656c835145d6d5eeec448382df8faf006e9ad59eabf60e30f5b8ee66b44f86e1c9'
+ 'd178dad69501967382d5c841f65e4f57651042bee8117041a9baa35ab3fa73af8174b8b999ae9e72ec381c52744ccaaabb77944d59f123c04b6ed5626432d843'
'b6ef77035611139fa9a6d5b8d30570e2781bb4da483bb569884b0bd0129b62e0b82a5a6776fefe43fee801c70d39de1ea4d4c177f7cedd5ac135e3c64f7b895a'
'e94aa35d92cec92f4b0d487e0569790f3b712b9eaa5107f14a4200578e398ca740bf369f30f070c8beb56a72d1a6d0fc06beb650d798a64f44abe5e3af327728')
@@ -91,8 +94,9 @@ prepare() {
# Hotfixes
echo "Applying hotfixes"
patch -p1 -i ../0001-ZEN-Add-sysctl-and-CONFIG-to-disallow-unprivileged-C.patch
- patch -p1 -i ../0003-drm-sched-add-DRM_SCHED_FENCE_DONT_PIPELINE-flag.patch
- patch -p1 -i ../0004-drm-amdgpu-use-DRM_SCHED_FENCE_DONT_PIPELINE-for-VM-.patch
+ patch -p1 -i ../0002-Revert-drm-display-dp_mst-Move-all-payload-info-into.patch
+ patch -p1 -i ../0003-btrfs-fix-invalid-leaf-access-due-to-inline-extent-d.patch
+ patch -p1 -i ../0004-wifi-mac80211-fix-initialization-of-rx-link-and-rx-l.patch
# graysky gcc patch
echo "Applying graysky gcc patch"
diff --git a/config b/config
index 19245ff..6a19ff2 100644
--- a/config
+++ b/config
@@ -1,15 +1,15 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 6.0.6 Kernel Configuration
+# Linux/x86 6.1.7 Kernel Configuration
#
-CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.0"
+CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.2.1 20230111"
CONFIG_CC_IS_GCC=y
-CONFIG_GCC_VERSION=120200
+CONFIG_GCC_VERSION=120201
CONFIG_CLANG_VERSION=0
CONFIG_AS_IS_GNU=y
-CONFIG_AS_VERSION=23900
+CONFIG_AS_VERSION=24000
CONFIG_LD_IS_BFD=y
-CONFIG_LD_VERSION=23900
+CONFIG_LD_VERSION=24000
CONFIG_LLD_VERSION=0
CONFIG_CC_CAN_LINK=y
CONFIG_CC_CAN_LINK_STATIC=y
@@ -206,6 +206,7 @@ CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
CONFIG_CC_HAS_INT128=y
CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5"
+CONFIG_GCC11_NO_ARRAY_BOUNDS=y
CONFIG_GCC12_NO_ARRAY_BOUNDS=y
CONFIG_CC_NO_ARRAY_BOUNDS=y
CONFIG_ARCH_SUPPORTS_INT128=y
@@ -215,7 +216,6 @@ CONFIG_CGROUPS=y
CONFIG_PAGE_COUNTER=y
# CONFIG_CGROUP_FAVOR_DYNMODS is not set
CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
CONFIG_MEMCG_KMEM=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_WRITEBACK=y
@@ -380,6 +380,7 @@ CONFIG_XEN_SAVE_RESTORE=y
# CONFIG_XEN_DEBUG_FS is not set
CONFIG_XEN_PVH=y
CONFIG_XEN_DOM0=y
+CONFIG_XEN_PV_MSR_SAFE=y
CONFIG_KVM_GUEST=y
CONFIG_ARCH_CPUIDLE_HALTPOLL=y
CONFIG_PVH=y
@@ -702,6 +703,7 @@ CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
CONFIG_X86_INTEL_PSTATE=y
CONFIG_X86_PCC_CPUFREQ=m
CONFIG_X86_AMD_PSTATE=y
+CONFIG_X86_AMD_PSTATE_UT=m
CONFIG_X86_ACPI_CPUFREQ=m
CONFIG_X86_ACPI_CPUFREQ_CPB=y
CONFIG_X86_POWERNOW_K8=m
@@ -758,6 +760,8 @@ CONFIG_HAVE_KVM_IRQCHIP=y
CONFIG_HAVE_KVM_IRQFD=y
CONFIG_HAVE_KVM_IRQ_ROUTING=y
CONFIG_HAVE_KVM_DIRTY_RING=y
+CONFIG_HAVE_KVM_DIRTY_RING_TSO=y
+CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y
CONFIG_HAVE_KVM_EVENTFD=y
CONFIG_KVM_MMIO=y
CONFIG_KVM_ASYNC_PF=y
@@ -776,6 +780,7 @@ CONFIG_KVM_WERROR=y
CONFIG_KVM_INTEL=m
# CONFIG_X86_SGX_KVM is not set
CONFIG_KVM_AMD=m
+# CONFIG_KVM_AMD_SEV is not set
CONFIG_KVM_XEN=y
CONFIG_KVM_EXTERNAL_WRITE_TRACKING=y
CONFIG_AS_AVX512=y
@@ -824,6 +829,9 @@ CONFIG_ARCH_WANTS_NO_INSTR=y
CONFIG_HAVE_ASM_MODVERSIONS=y
CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
CONFIG_HAVE_RSEQ=y
+CONFIG_HAVE_RUST=y
+# CONFIG_RUST is not set
+# CONFIG_RUST_IS_AVAILABLE is not set
CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y
CONFIG_HAVE_HW_BREAKPOINT=y
CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
@@ -855,6 +863,7 @@ CONFIG_STACKPROTECTOR_STRONG=y
CONFIG_ARCH_SUPPORTS_LTO_CLANG=y
CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y
CONFIG_LTO_NONE=y
+CONFIG_ARCH_SUPPORTS_CFI_CLANG=y
CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y
CONFIG_HAVE_CONTEXT_TRACKING_USER=y
CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK=y
@@ -917,6 +926,7 @@ CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y
CONFIG_ARCH_HAS_ELFCORE_COMPAT=y
CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH=y
CONFIG_DYNAMIC_SIGFRAME=y
+CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG=y
#
# GCOV-based kernel profiling
@@ -1082,6 +1092,7 @@ CONFIG_ZSMALLOC=y
#
# CONFIG_SLAB is not set
CONFIG_SLUB=y
+# CONFIG_SLOB is not set
CONFIG_SLAB_MERGE_DEFAULT=y
CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_SLAB_FREELIST_HARDENED=y
@@ -1111,6 +1122,7 @@ CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
CONFIG_MEMORY_BALLOON=y
CONFIG_BALLOON_COMPACTION=y
CONFIG_COMPACTION=y
+CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1
CONFIG_PAGE_REPORTING=y
CONFIG_MIGRATION=y
CONFIG_DEVICE_MIGRATION=y
@@ -1171,6 +1183,9 @@ CONFIG_HAVE_ARCH_USERFAULTFD_WP=y
CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y
CONFIG_PTE_MARKER=y
CONFIG_PTE_MARKER_UFFD_WP=y
+CONFIG_LRU_GEN=y
+CONFIG_LRU_GEN_ENABLED=y
+# CONFIG_LRU_GEN_STATS is not set
#
# Data Access Monitoring
@@ -1738,7 +1753,6 @@ CONFIG_NET_DSA_TAG_XRS700X=m
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
CONFIG_VLAN_8021Q_MVRP=y
-# CONFIG_DECNET is not set
CONFIG_LLC=m
CONFIG_LLC2=m
CONFIG_ATALK=m
@@ -2678,6 +2692,7 @@ CONFIG_UACCE=m
CONFIG_PVPANIC=y
CONFIG_PVPANIC_MMIO=m
CONFIG_PVPANIC_PCI=m
+CONFIG_GP_PCI1XXXX=m
# end of Misc devices
#
@@ -2844,6 +2859,7 @@ CONFIG_SATA_PMP=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_MOBILE_LPM_POLICY=3
CONFIG_SATA_AHCI_PLATFORM=m
+CONFIG_AHCI_DWC=m
CONFIG_SATA_INIC162X=m
CONFIG_SATA_ACARD_AHCI=m
CONFIG_SATA_SIL24=m
@@ -2919,7 +2935,6 @@ CONFIG_PATA_MPIIX=m
CONFIG_PATA_NS87410=m
CONFIG_PATA_OPTI=m
CONFIG_PATA_PCMCIA=m
-# CONFIG_PATA_PLATFORM is not set
CONFIG_PATA_RZ1000=m
#
@@ -3276,8 +3291,11 @@ CONFIG_ICE_HWTS=y
CONFIG_FM10K=m
CONFIG_IGC=m
CONFIG_NET_VENDOR_WANGXUN=y
+CONFIG_NGBE=m
CONFIG_TXGBE=m
CONFIG_JME=m
+CONFIG_NET_VENDOR_ADI=y
+CONFIG_ADIN1110=m
CONFIG_NET_VENDOR_LITEX=y
CONFIG_NET_VENDOR_MARVELL=y
CONFIG_MVMDIO=m
@@ -3308,6 +3326,7 @@ CONFIG_MLX5_TC_CT=y
CONFIG_MLX5_TC_SAMPLE=y
CONFIG_MLX5_CORE_EN_DCB=y
CONFIG_MLX5_CORE_IPOIB=y
+CONFIG_MLX5_EN_MACSEC=y
CONFIG_MLX5_EN_IPSEC=y
CONFIG_MLX5_EN_TLS=y
CONFIG_MLX5_SW_STEERING=y
@@ -3530,6 +3549,8 @@ CONFIG_DP83TD510_PHY=m
CONFIG_VITESSE_PHY=m
CONFIG_XILINX_GMII2RGMII=m
CONFIG_MICREL_KS8995MA=m
+CONFIG_PSE_CONTROLLER=y
+CONFIG_PSE_REGULATOR=m
CONFIG_CAN_DEV=m
CONFIG_CAN_VCAN=m
CONFIG_CAN_VXCAN=m
@@ -3623,6 +3644,7 @@ CONFIG_MDIO_THUNDER=m
#
CONFIG_PCS_XPCS=m
CONFIG_PCS_LYNX=m
+CONFIG_PCS_ALTERA_TSE=m
# end of PCS device drivers
CONFIG_PLIP=m
@@ -4125,6 +4147,7 @@ CONFIG_KEYBOARD_MCS=m
CONFIG_KEYBOARD_MPR121=m
CONFIG_KEYBOARD_NEWTON=m
CONFIG_KEYBOARD_OPENCORES=m
+CONFIG_KEYBOARD_PINEPHONE=m
CONFIG_KEYBOARD_SAMSUNG=m
CONFIG_KEYBOARD_STOWAWAY=m
CONFIG_KEYBOARD_SUNKBD=m
@@ -4364,6 +4387,7 @@ CONFIG_INPUT_PCAP=m
CONFIG_INPUT_ADXL34X=m
CONFIG_INPUT_ADXL34X_I2C=m
CONFIG_INPUT_ADXL34X_SPI=m
+CONFIG_INPUT_IBM_PANEL=m
CONFIG_INPUT_IMS_PCU=m
CONFIG_INPUT_IQS269A=m
CONFIG_INPUT_IQS626A=m
@@ -4377,6 +4401,7 @@ CONFIG_INPUT_DRV260X_HAPTICS=m
CONFIG_INPUT_DRV2665_HAPTICS=m
CONFIG_INPUT_DRV2667_HAPTICS=m
CONFIG_INPUT_RAVE_SP_PWRBUTTON=m
+CONFIG_INPUT_RT5120_PWRKEY=m
CONFIG_RMI4_CORE=m
CONFIG_RMI4_I2C=m
CONFIG_RMI4_SPI=m
@@ -4486,6 +4511,7 @@ CONFIG_SERIAL_ARC_NR_PORTS=1
CONFIG_SERIAL_RP2=m
CONFIG_SERIAL_RP2_NR_UARTS=32
CONFIG_SERIAL_FSL_LPUART=m
+CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
CONFIG_SERIAL_FSL_LINFLEXUART=m
CONFIG_SERIAL_MEN_Z135=m
CONFIG_SERIAL_SPRD=m
@@ -4646,7 +4672,7 @@ CONFIG_I2C_CBUS_GPIO=m
CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_SLAVE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
-CONFIG_I2C_DESIGNWARE_AMDPSP=y
+# CONFIG_I2C_DESIGNWARE_AMDPSP is not set
CONFIG_I2C_DESIGNWARE_BAYTRAIL=y
CONFIG_I2C_DESIGNWARE_PCI=y
CONFIG_I2C_EMEV2=m
@@ -4665,6 +4691,7 @@ CONFIG_I2C_DIOLAN_U2C=m
CONFIG_I2C_DLN2=m
CONFIG_I2C_CP2615=m
CONFIG_I2C_PARPORT=m
+CONFIG_I2C_PCI1XXXX=m
CONFIG_I2C_ROBOTFUZZ_OSIF=m
CONFIG_I2C_TAOS_EVM=m
CONFIG_I2C_TINY_USB=m
@@ -4715,6 +4742,7 @@ CONFIG_SPI_INTEL_PCI=m
CONFIG_SPI_INTEL_PLATFORM=m
CONFIG_SPI_LM70_LLP=m
CONFIG_SPI_MICROCHIP_CORE=m
+CONFIG_SPI_MICROCHIP_CORE_QSPI=m
# CONFIG_SPI_LANTIQ_SSC is not set
CONFIG_SPI_OC_TINY=m
CONFIG_SPI_PXA2XX=m
@@ -4780,6 +4808,7 @@ CONFIG_PINCONF=y
CONFIG_GENERIC_PINCONF=y
# CONFIG_DEBUG_PINCTRL is not set
CONFIG_PINCTRL_AMD=y
+CONFIG_PINCTRL_CY8C95X0=m
CONFIG_PINCTRL_DA9062=m
CONFIG_PINCTRL_MCP23S08_I2C=m
CONFIG_PINCTRL_MCP23S08_SPI=m
@@ -4861,7 +4890,6 @@ CONFIG_GPIO_WS16C48=m
#
# I2C GPIO expanders
#
-CONFIG_GPIO_ADP5588=m
CONFIG_GPIO_MAX7300=m
CONFIG_GPIO_MAX732X=m
CONFIG_GPIO_PCA953X=m
@@ -5039,6 +5067,7 @@ CONFIG_CHARGER_MAX8997=m
CONFIG_CHARGER_MAX8998=m
CONFIG_CHARGER_MP2629=m
CONFIG_CHARGER_MT6360=m
+CONFIG_CHARGER_MT6370=m
CONFIG_CHARGER_BQ2415X=m
CONFIG_CHARGER_BQ24190=m
CONFIG_CHARGER_BQ24257=m
@@ -5095,7 +5124,6 @@ CONFIG_SENSORS_K10TEMP=m
CONFIG_SENSORS_FAM15H_POWER=m
CONFIG_SENSORS_APPLESMC=m
CONFIG_SENSORS_ASB100=m
-CONFIG_SENSORS_ASPEED=m
CONFIG_SENSORS_ATXP1=m
CONFIG_SENSORS_CORSAIR_CPRO=m
CONFIG_SENSORS_CORSAIR_PSU=m
@@ -5147,6 +5175,7 @@ CONFIG_SENSORS_MAX1668=m
CONFIG_SENSORS_MAX197=m
CONFIG_SENSORS_MAX31722=m
CONFIG_SENSORS_MAX31730=m
+CONFIG_SENSORS_MAX31760=m
CONFIG_SENSORS_MAX6620=m
CONFIG_SENSORS_MAX6621=m
CONFIG_SENSORS_MAX6639=m
@@ -5232,6 +5261,7 @@ CONFIG_SENSORS_Q54SJ108A2=m
CONFIG_SENSORS_STPDDC60=m
CONFIG_SENSORS_TPS40422=m
CONFIG_SENSORS_TPS53679=m
+CONFIG_SENSORS_TPS546D24=m
CONFIG_SENSORS_UCD9000=m
CONFIG_SENSORS_UCD9200=m
CONFIG_SENSORS_XDPE152=m
@@ -5250,6 +5280,7 @@ CONFIG_SENSORS_SY7636A=m
CONFIG_SENSORS_DME1737=m
CONFIG_SENSORS_EMC1403=m
CONFIG_SENSORS_EMC2103=m
+CONFIG_SENSORS_EMC2305=m
CONFIG_SENSORS_EMC6W201=m
CONFIG_SENSORS_SMSC47M1=m
CONFIG_SENSORS_SMSC47M192=m
@@ -5393,6 +5424,7 @@ CONFIG_ADVANTECH_WDT=m
CONFIG_ALIM1535_WDT=m
CONFIG_ALIM7101_WDT=m
CONFIG_EBC_C384_WDT=m
+CONFIG_EXAR_WDT=m
CONFIG_F71808E_WDT=m
CONFIG_SP5100_TCO=m
CONFIG_SBC_FITPC2_WATCHDOG=m
@@ -5526,8 +5558,10 @@ CONFIG_MFD_MAX8925=y
CONFIG_MFD_MAX8997=y
CONFIG_MFD_MAX8998=y
CONFIG_MFD_MT6360=m
+CONFIG_MFD_MT6370=m
CONFIG_MFD_MT6397=m
CONFIG_MFD_MENF21BMC=m
+CONFIG_MFD_OCELOT=m
CONFIG_EZX_PCAP=y
CONFIG_MFD_VIPERBOARD=m
CONFIG_MFD_RETU=m
@@ -5535,9 +5569,11 @@ CONFIG_MFD_PCF50633=m
CONFIG_PCF50633_ADC=m
CONFIG_PCF50633_GPIO=m
CONFIG_UCB1400_CORE=m
+CONFIG_MFD_SY7636A=m
CONFIG_MFD_RDC321X=m
CONFIG_MFD_RT4831=m
CONFIG_MFD_RT5033=m
+CONFIG_MFD_RT5120=m
CONFIG_MFD_RC5T583=y
CONFIG_MFD_SI476X_CORE=m
CONFIG_MFD_SIMPLE_MFD_I2C=m
@@ -5645,9 +5681,12 @@ CONFIG_REGULATOR_MC13892=m
CONFIG_REGULATOR_MP8859=m
CONFIG_REGULATOR_MT6311=m
CONFIG_REGULATOR_MT6323=m
+CONFIG_REGULATOR_MT6331=m
+CONFIG_REGULATOR_MT6332=m
CONFIG_REGULATOR_MT6358=m
CONFIG_REGULATOR_MT6359=m
CONFIG_REGULATOR_MT6360=m
+CONFIG_REGULATOR_MT6370=m
CONFIG_REGULATOR_MT6397=m
CONFIG_REGULATOR_PALMAS=m
CONFIG_REGULATOR_PCA9450=m
@@ -5661,6 +5700,7 @@ CONFIG_REGULATOR_RC5T583=m
CONFIG_REGULATOR_RT4801=m
CONFIG_REGULATOR_RT4831=m
CONFIG_REGULATOR_RT5033=m
+CONFIG_REGULATOR_RT5120=m
CONFIG_REGULATOR_RT5190A=m
CONFIG_REGULATOR_RT5759=m
CONFIG_REGULATOR_RT6160=m
@@ -5815,7 +5855,6 @@ CONFIG_MEDIA_USB_SUPPORT=y
#
# Webcam devices
#
-CONFIG_VIDEO_CPIA2=m
CONFIG_USB_GSPCA=m
CONFIG_USB_GSPCA_BENQ=m
CONFIG_USB_GSPCA_CONEX=m
@@ -5873,7 +5912,6 @@ CONFIG_USB_S2255=m
CONFIG_VIDEO_USBTV=m
CONFIG_USB_VIDEO_CLASS=m
CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
-CONFIG_USB_ZR364XX=m
#
# Analog TV USB devices
@@ -5900,9 +5938,6 @@ CONFIG_VIDEO_CX231XX=m
CONFIG_VIDEO_CX231XX_RC=y
CONFIG_VIDEO_CX231XX_ALSA=m
CONFIG_VIDEO_CX231XX_DVB=m
-CONFIG_VIDEO_TM6000=m
-CONFIG_VIDEO_TM6000_ALSA=m
-CONFIG_VIDEO_TM6000_DVB=m
#
# Digital TV USB devices
@@ -5969,11 +6004,11 @@ CONFIG_MEDIA_PCI_SUPPORT=y
#
# Media capture support
#
-CONFIG_VIDEO_MEYE=m
CONFIG_VIDEO_SOLO6X10=m
CONFIG_VIDEO_TW5864=m
CONFIG_VIDEO_TW68=m
CONFIG_VIDEO_TW686X=m
+# CONFIG_VIDEO_ZORAN is not set
#
# Media capture/analog TV support
@@ -5982,9 +6017,6 @@ CONFIG_VIDEO_DT3155=m
CONFIG_VIDEO_IVTV=m
CONFIG_VIDEO_IVTV_ALSA=m
# CONFIG_VIDEO_FB_IVTV is not set
-CONFIG_VIDEO_HEXIUM_GEMINI=m
-CONFIG_VIDEO_HEXIUM_ORION=m
-CONFIG_VIDEO_MXB=m
#
# Media capture/analog/hybrid TV support
@@ -6028,10 +6060,6 @@ CONFIG_DVB_PLUTO2=m
CONFIG_DVB_PT1=m
CONFIG_DVB_PT3=m
CONFIG_DVB_SMIPCIE=m
-CONFIG_DVB_BUDGET_CORE=m
-CONFIG_DVB_BUDGET=m
-CONFIG_DVB_BUDGET_CI=m
-CONFIG_DVB_BUDGET_AV=m
CONFIG_VIDEO_IPU3_CIO2=m
CONFIG_CIO2_BRIDGE=y
CONFIG_RADIO_ADAPTERS=m
@@ -6144,6 +6172,10 @@ CONFIG_VIDEO_CAFE_CCIC=m
#
#
+# Verisilicon media platform drivers
+#
+
+#
# VIA media platform drivers
#
@@ -6180,8 +6212,6 @@ CONFIG_TTPCI_EEPROM=m
CONFIG_VIDEO_CX2341X=m
CONFIG_VIDEO_TVEEPROM=m
CONFIG_DVB_B2C2_FLEXCOP=m
-CONFIG_VIDEO_SAA7146=m
-CONFIG_VIDEO_SAA7146_VV=m
CONFIG_SMS_SIANO_MDTV=m
CONFIG_SMS_SIANO_RC=y
# CONFIG_SMS_SIANO_DEBUGFS is not set
@@ -6303,9 +6333,9 @@ CONFIG_VIDEO_MSP3400=m
CONFIG_VIDEO_SONY_BTF_MPX=m
# CONFIG_VIDEO_TDA1997X is not set
CONFIG_VIDEO_TDA7432=m
-CONFIG_VIDEO_TDA9840=m
-CONFIG_VIDEO_TEA6415C=m
-CONFIG_VIDEO_TEA6420=m
+# CONFIG_VIDEO_TDA9840 is not set
+# CONFIG_VIDEO_TEA6415C is not set
+# CONFIG_VIDEO_TEA6420 is not set
# CONFIG_VIDEO_TLV320AIC23B is not set
CONFIG_VIDEO_TVAUDIO=m
CONFIG_VIDEO_UDA1342=m
@@ -6490,13 +6520,13 @@ CONFIG_DVB_STV6110=m
CONFIG_DVB_TDA10071=m
CONFIG_DVB_TDA10086=m
CONFIG_DVB_TDA8083=m
-CONFIG_DVB_TDA8261=m
+# CONFIG_DVB_TDA8261 is not set
CONFIG_DVB_TDA826X=m
CONFIG_DVB_TS2020=m
-CONFIG_DVB_TUA6100=m
+# CONFIG_DVB_TUA6100 is not set
CONFIG_DVB_TUNER_CX24113=m
CONFIG_DVB_TUNER_ITD1000=m
-CONFIG_DVB_VES1X93=m
+# CONFIG_DVB_VES1X93 is not set
CONFIG_DVB_ZL10036=m
CONFIG_DVB_ZL10039=m
@@ -6517,7 +6547,7 @@ CONFIG_DVB_DIB7000P=m
CONFIG_DVB_DRXD=m
CONFIG_DVB_EC100=m
CONFIG_DVB_GP8PSK_FE=m
-CONFIG_DVB_L64781=m
+# CONFIG_DVB_L64781 is not set
CONFIG_DVB_MT352=m
CONFIG_DVB_NXT6000=m
CONFIG_DVB_RTL2830=m
@@ -6629,7 +6659,7 @@ CONFIG_DRM=y
CONFIG_DRM_MIPI_DBI=m
CONFIG_DRM_MIPI_DSI=y
# CONFIG_DRM_DEBUG_MM is not set
-# CONFIG_DRM_DEBUG_SELFTEST is not set
+CONFIG_DRM_USE_DYNAMIC_DEBUG=y
CONFIG_DRM_KMS_HELPER=y
# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set
# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
@@ -6647,7 +6677,7 @@ CONFIG_DRM_TTM=m
CONFIG_DRM_BUDDY=m
CONFIG_DRM_VRAM_HELPER=m
CONFIG_DRM_TTM_HELPER=m
-CONFIG_DRM_GEM_CMA_HELPER=m
+CONFIG_DRM_GEM_DMA_HELPER=m
CONFIG_DRM_GEM_SHMEM_HELPER=y
CONFIG_DRM_SCHED=m
@@ -6887,6 +6917,7 @@ CONFIG_BACKLIGHT_PWM=m
CONFIG_BACKLIGHT_DA903X=m
CONFIG_BACKLIGHT_DA9052=m
CONFIG_BACKLIGHT_MAX8925=m
+CONFIG_BACKLIGHT_MT6370=m
CONFIG_BACKLIGHT_APPLE=m
CONFIG_BACKLIGHT_QCOM_WLED=m
CONFIG_BACKLIGHT_RT4831=m
@@ -7176,6 +7207,8 @@ CONFIG_SND_SOC_AMD_MACH_COMMON=m
CONFIG_SND_SOC_AMD_LEGACY_MACH=m
CONFIG_SND_SOC_AMD_SOF_MACH=m
CONFIG_SND_SOC_AMD_RPL_ACP6x=m
+CONFIG_SND_SOC_AMD_PS=m
+CONFIG_SND_SOC_AMD_PS_MACH=m
CONFIG_SND_ATMEL_SOC=m
# CONFIG_SND_BCM63XX_I2S_WHISTLER is not set
CONFIG_SND_DESIGNWARE_I2S=m
@@ -7315,6 +7348,7 @@ CONFIG_SND_SOC_SOF_INTEL_IPC4=y
CONFIG_SND_SOC_SOF_AMD_TOPLEVEL=m
CONFIG_SND_SOC_SOF_AMD_COMMON=m
CONFIG_SND_SOC_SOF_AMD_RENOIR=m
+CONFIG_SND_SOC_SOF_AMD_REMBRANDT=m
CONFIG_SND_SOC_SOF_INTEL_TOPLEVEL=y
CONFIG_SND_SOC_SOF_INTEL_HIFI_EP_IPC=m
CONFIG_SND_SOC_SOF_INTEL_ATOM_HIFI_EP=m
@@ -7322,6 +7356,8 @@ CONFIG_SND_SOC_SOF_INTEL_COMMON=m
CONFIG_SND_SOC_SOF_BAYTRAIL=m
# CONFIG_SND_SOC_SOF_BROADWELL is not set
CONFIG_SND_SOC_SOF_MERRIFIELD=m
+# CONFIG_SND_SOC_SOF_SKYLAKE is not set
+# CONFIG_SND_SOC_SOF_KABYLAKE is not set
CONFIG_SND_SOC_SOF_INTEL_APL=m
CONFIG_SND_SOC_SOF_APOLLOLAKE=m
CONFIG_SND_SOC_SOF_GEMINILAKE=m
@@ -7405,12 +7441,14 @@ CONFIG_SND_SOC_CS35L45_TABLES=m
CONFIG_SND_SOC_CS35L45=m
CONFIG_SND_SOC_CS35L45_SPI=m
CONFIG_SND_SOC_CS35L45_I2C=m
+CONFIG_SND_SOC_CS42L42_CORE=m
CONFIG_SND_SOC_CS42L42=m
CONFIG_SND_SOC_CS42L51=m
CONFIG_SND_SOC_CS42L51_I2C=m
CONFIG_SND_SOC_CS42L52=m
CONFIG_SND_SOC_CS42L56=m
CONFIG_SND_SOC_CS42L73=m
+CONFIG_SND_SOC_CS42L83=m
CONFIG_SND_SOC_CS4234=m
CONFIG_SND_SOC_CS4265=m
CONFIG_SND_SOC_CS4270=m
@@ -7431,6 +7469,7 @@ CONFIG_SND_SOC_HDMI_CODEC=m
CONFIG_SND_SOC_ES7134=m
CONFIG_SND_SOC_ES7241=m
CONFIG_SND_SOC_ES8316=m
+CONFIG_SND_SOC_ES8326=m
CONFIG_SND_SOC_ES8328=m
CONFIG_SND_SOC_ES8328_I2C=m
CONFIG_SND_SOC_ES8328_SPI=m
@@ -7521,6 +7560,8 @@ CONFIG_SND_SOC_SIGMADSP_REGMAP=m
CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m
CONFIG_SND_SOC_SIMPLE_MUX=m
CONFIG_SND_SOC_SPDIF=m
+CONFIG_SND_SOC_SRC4XXX_I2C=m
+CONFIG_SND_SOC_SRC4XXX=m
CONFIG_SND_SOC_SSM2305=m
CONFIG_SND_SOC_SSM2518=m
CONFIG_SND_SOC_SSM2602=m
@@ -7678,6 +7719,7 @@ CONFIG_HID_KYE=m
CONFIG_HID_UCLOGIC=m
CONFIG_HID_WALTOP=m
CONFIG_HID_VIEWSONIC=m
+CONFIG_HID_VRC2=m
CONFIG_HID_XIAOMI=m
CONFIG_HID_GYRATION=m
CONFIG_HID_ICADE=m
@@ -7722,6 +7764,7 @@ CONFIG_HID_PICOLCD_CIR=y
CONFIG_HID_PLANTRONICS=m
CONFIG_HID_PLAYSTATION=m
CONFIG_PLAYSTATION_FF=y
+CONFIG_HID_PXRC=m
CONFIG_HID_RAZER=m
CONFIG_HID_PRIMAX=m
CONFIG_HID_RETRODE=m
@@ -8190,6 +8233,7 @@ CONFIG_TYPEC_TCPM=m
CONFIG_TYPEC_TCPCI=m
CONFIG_TYPEC_RT1711H=m
CONFIG_TYPEC_MT6360=m
+CONFIG_TYPEC_TCPCI_MT6370=m
CONFIG_TYPEC_TCPCI_MAXIM=m
CONFIG_TYPEC_FUSB302=m
CONFIG_TYPEC_WCOVE=m
@@ -8821,12 +8865,6 @@ CONFIG_ADT7316_I2C=m
# end of Analog digital bi-direction converters
#
-# Capacitance to digital converters
-#
-CONFIG_AD7746=m
-# end of Capacitance to digital converters
-
-#
# Direct Digital Synthesis
#
CONFIG_AD9832=m
@@ -8867,18 +8905,9 @@ CONFIG_VIDEO_ATOMISP_GC0310=m
CONFIG_VIDEO_ATOMISP_OV2680=m
CONFIG_VIDEO_ATOMISP_OV5693=m
CONFIG_VIDEO_ATOMISP_LM3554=m
-CONFIG_DVB_AV7110_IR=y
-CONFIG_DVB_AV7110=m
-CONFIG_DVB_AV7110_OSD=y
-CONFIG_DVB_BUDGET_PATCH=m
-CONFIG_DVB_SP8870=m
CONFIG_VIDEO_IPU3_IMGU=m
-CONFIG_VIDEO_STKWEBCAM=m
-# CONFIG_VIDEO_ZORAN is not set
+# CONFIG_STAGING_MEDIA_DEPRECATED is not set
CONFIG_LTE_GDM724X=m
-CONFIG_FIREWIRE_SERIAL=m
-CONFIG_FWTTY_MAX_TOTAL_PORTS=64
-CONFIG_FWTTY_MAX_CARD_PORTS=32
# CONFIG_FB_TFT is not set
CONFIG_MOST_COMPONENTS=m
CONFIG_MOST_NET=m
@@ -8910,6 +8939,7 @@ CONFIG_CROS_EC_TYPEC=m
CONFIG_CROS_USBPD_LOGGER=m
CONFIG_CROS_USBPD_NOTIFY=m
CONFIG_CHROMEOS_PRIVACY_SCREEN=m
+CONFIG_CROS_TYPEC_SWITCH=m
CONFIG_WILCO_EC=m
CONFIG_WILCO_EC_DEBUGFS=m
CONFIG_WILCO_EC_EVENTS=m
@@ -8948,6 +8978,7 @@ CONFIG_YOGABOOK_WMI=m
CONFIG_ACERHDF=m
CONFIG_ACER_WIRELESS=m
CONFIG_ACER_WMI=m
+CONFIG_AMD_PMF=m
CONFIG_AMD_PMC=m
CONFIG_AMD_HSMP=m
CONFIG_ADV_SWBUTTON=m
@@ -9310,6 +9341,7 @@ CONFIG_MMA8452=m
CONFIG_MMA9551_CORE=m
CONFIG_MMA9551=m
CONFIG_MMA9553=m
+CONFIG_MSA311=m
CONFIG_MXC4005=m
CONFIG_MXC6255=m
CONFIG_SCA3000=m
@@ -9361,6 +9393,7 @@ CONFIG_LTC2497=m
CONFIG_MAX1027=m
CONFIG_MAX11100=m
CONFIG_MAX1118=m
+CONFIG_MAX11205=m
CONFIG_MAX1241=m
CONFIG_MAX1363=m
CONFIG_MAX9611=m
@@ -9372,6 +9405,7 @@ CONFIG_MEN_Z188_ADC=m
CONFIG_MP2629_ADC=m
CONFIG_NAU7802=m
CONFIG_PALMAS_GPADC=m
+CONFIG_RICHTEK_RTQ6056=m
CONFIG_SD_ADC_MODULATOR=m
CONFIG_TI_ADC081C=m
CONFIG_TI_ADC0832=m
@@ -9420,6 +9454,7 @@ CONFIG_HMC425=m
# Capacitance to digital converters
#
CONFIG_AD7150=m
+CONFIG_AD7746=m
# end of Capacitance to digital converters
#
@@ -9618,6 +9653,9 @@ CONFIG_ADIS16480=m
CONFIG_BMI160=m
CONFIG_BMI160_I2C=m
CONFIG_BMI160_SPI=m
+CONFIG_BOSCH_BNO055=m
+CONFIG_BOSCH_BNO055_SERIAL=m
+CONFIG_BOSCH_BNO055_I2C=m
CONFIG_FXOS8700=m
CONFIG_FXOS8700_I2C=m
CONFIG_FXOS8700_SPI=m
@@ -9670,6 +9708,7 @@ CONFIG_JSA1212=m
CONFIG_RPR0521=m
CONFIG_SENSORS_LM3533=m
CONFIG_LTR501=m
+CONFIG_LTRF216A=m
CONFIG_LV0104CS=m
CONFIG_MAX44000=m
CONFIG_MAX44009=m
@@ -9893,6 +9932,7 @@ CONFIG_IPACK_BUS=m
CONFIG_BOARD_TPCI200=m
CONFIG_SERIAL_IPOCTAL=m
CONFIG_RESET_CONTROLLER=y
+# CONFIG_RESET_SIMPLE is not set
CONFIG_RESET_TI_SYSCON=m
CONFIG_RESET_TI_TPS380X=m
@@ -9961,7 +10001,7 @@ CONFIG_DEV_DAX_HMEM_DEVICES=y
CONFIG_DEV_DAX_KMEM=m
CONFIG_NVMEM=y
CONFIG_NVMEM_SYSFS=y
-CONFIG_RAVE_SP_EEPROM=m
+CONFIG_NVMEM_RAVE_SP_EEPROM=m
CONFIG_NVMEM_RMEM=m
#
@@ -10575,6 +10615,7 @@ CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_SIMD=m
CONFIG_CRYPTO_ENGINE=m
+# end of Crypto core or helper
#
# Public-key cryptography
@@ -10588,121 +10629,95 @@ CONFIG_CRYPTO_ECDSA=y
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
-CONFIG_CRYPTO_CURVE25519_X86=m
+# end of Public-key cryptography
#
-# Authenticated Encryption with Associated Data
+# Block ciphers
#
-CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_AEGIS128_AESNI_SSE2=m
-CONFIG_CRYPTO_SEQIV=m
-CONFIG_CRYPTO_ECHAINIV=m
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_AES_TI=m
+CONFIG_CRYPTO_ARIA=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_BLOWFISH_COMMON=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST_COMMON=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_SM4=m
+CONFIG_CRYPTO_SM4_GENERIC=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+# end of Block ciphers
#
-# Block modes
+# Length-preserving ciphers and modes
#
+CONFIG_CRYPTO_ADIANTUM=m
+CONFIG_CRYPTO_CHACHA20=m
CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_CTR=y
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_HCTR2=m
+CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XCTR=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_NHPOLY1305=m
-CONFIG_CRYPTO_NHPOLY1305_SSE2=m
-CONFIG_CRYPTO_NHPOLY1305_AVX2=m
-CONFIG_CRYPTO_ADIANTUM=m
-CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_ESSIV=m
+# end of Length-preserving ciphers and modes
#
-# Hash modes
+# AEAD (authenticated encryption with associated data) ciphers
#
-CONFIG_CRYPTO_CMAC=m
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_SEQIV=m
+CONFIG_CRYPTO_ECHAINIV=m
+CONFIG_CRYPTO_ESSIV=m
+# end of AEAD (authenticated encryption with associated data) ciphers
#
-# Digest
+# Hashes, digests, and MACs
#
-CONFIG_CRYPTO_CRC32C=m
-CONFIG_CRYPTO_CRC32C_INTEL=m
-CONFIG_CRYPTO_CRC32=m
-CONFIG_CRYPTO_CRC32_PCLMUL=m
-CONFIG_CRYPTO_XXHASH=m
CONFIG_CRYPTO_BLAKE2B=m
-CONFIG_CRYPTO_BLAKE2S_X86=y
-CONFIG_CRYPTO_CRCT10DIF=y
-CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m
-CONFIG_CRYPTO_CRC64_ROCKSOFT=y
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_GHASH=m
-CONFIG_CRYPTO_POLYVAL=m
-CONFIG_CRYPTO_POLYVAL_CLMUL_NI=m
-CONFIG_CRYPTO_POLY1305=m
-CONFIG_CRYPTO_POLY1305_X86_64=m
+CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_POLYVAL=m
+CONFIG_CRYPTO_POLY1305=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SHA1=y
-CONFIG_CRYPTO_SHA1_SSSE3=m
-CONFIG_CRYPTO_SHA256_SSSE3=m
-CONFIG_CRYPTO_SHA512_SSSE3=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_SM3_AVX_X86_64=m
CONFIG_CRYPTO_STREEBOG=m
+CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_XXHASH=m
+# end of Hashes, digests, and MACs
#
-# Ciphers
+# CRCs (cyclic redundancy checks)
#
-CONFIG_CRYPTO_AES=y
-CONFIG_CRYPTO_AES_TI=m
-CONFIG_CRYPTO_AES_NI_INTEL=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_BLOWFISH_COMMON=m
-CONFIG_CRYPTO_BLOWFISH_X86_64=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_CAMELLIA_X86_64=m
-CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m
-CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m
-CONFIG_CRYPTO_CAST_COMMON=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST5_AVX_X86_64=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_CAST6_AVX_X86_64=m
-CONFIG_CRYPTO_DES=m
-CONFIG_CRYPTO_DES3_EDE_X86_64=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_CHACHA20=m
-CONFIG_CRYPTO_CHACHA20_X86_64=m
-CONFIG_CRYPTO_ARIA=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m
-CONFIG_CRYPTO_SERPENT_AVX_X86_64=m
-CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m
-CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SM4_GENERIC=m
-CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=m
-CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_TWOFISH_COMMON=m
-CONFIG_CRYPTO_TWOFISH_X86_64=m
-CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m
-CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_CRCT10DIF=y
+CONFIG_CRYPTO_CRC64_ROCKSOFT=y
+# end of CRCs (cyclic redundancy checks)
#
# Compression
@@ -10713,9 +10728,10 @@ CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ZSTD=y
+# end of Compression
#
-# Random Number Generation
+# Random number generation
#
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_MENU=y
@@ -10725,6 +10741,11 @@ CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_DRBG=y
CONFIG_CRYPTO_JITTERENTROPY=y
CONFIG_CRYPTO_KDF800108_CTR=y
+# end of Random number generation
+
+#
+# Userspace interface
+#
CONFIG_CRYPTO_USER_API=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
@@ -10733,7 +10754,48 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE is not set
CONFIG_CRYPTO_STATS=y
+# end of Userspace interface
+
CONFIG_CRYPTO_HASH_INFO=y
+
+#
+# Accelerated Cryptographic Algorithms for CPU (x86)
+#
+CONFIG_CRYPTO_CURVE25519_X86=m
+CONFIG_CRYPTO_AES_NI_INTEL=m
+CONFIG_CRYPTO_BLOWFISH_X86_64=m
+CONFIG_CRYPTO_CAMELLIA_X86_64=m
+CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m
+CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m
+CONFIG_CRYPTO_CAST5_AVX_X86_64=m
+CONFIG_CRYPTO_CAST6_AVX_X86_64=m
+CONFIG_CRYPTO_DES3_EDE_X86_64=m
+CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m
+CONFIG_CRYPTO_SERPENT_AVX_X86_64=m
+CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m
+CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=m
+CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m
+CONFIG_CRYPTO_TWOFISH_X86_64=m
+CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m
+CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m
+CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64=m
+CONFIG_CRYPTO_CHACHA20_X86_64=m
+CONFIG_CRYPTO_AEGIS128_AESNI_SSE2=m
+CONFIG_CRYPTO_NHPOLY1305_SSE2=m
+CONFIG_CRYPTO_NHPOLY1305_AVX2=m
+CONFIG_CRYPTO_BLAKE2S_X86=y
+CONFIG_CRYPTO_POLYVAL_CLMUL_NI=m
+CONFIG_CRYPTO_POLY1305_X86_64=m
+CONFIG_CRYPTO_SHA1_SSSE3=m
+CONFIG_CRYPTO_SHA256_SSSE3=m
+CONFIG_CRYPTO_SHA512_SSSE3=m
+CONFIG_CRYPTO_SM3_AVX_X86_64=m
+CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m
+CONFIG_CRYPTO_CRC32C_INTEL=m
+CONFIG_CRYPTO_CRC32_PCLMUL=m
+CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m
+# end of Accelerated Cryptographic Algorithms for CPU (x86)
+
CONFIG_CRYPTO_HW=y
CONFIG_CRYPTO_DEV_PADLOCK=m
CONFIG_CRYPTO_DEV_PADLOCK_AES=m
@@ -10815,6 +10877,7 @@ CONFIG_ARCH_USE_SYM_ANNOTATIONS=y
#
# Crypto library routines
#
+CONFIG_CRYPTO_LIB_UTILS=y
CONFIG_CRYPTO_LIB_AES=y
CONFIG_CRYPTO_LIB_ARC4=m
CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S=y
@@ -10835,7 +10898,6 @@ CONFIG_CRYPTO_LIB_SHA1=y
CONFIG_CRYPTO_LIB_SHA256=y
# end of Crypto library routines
-CONFIG_LIB_MEMNEQ=y
CONFIG_CRC_CCITT=y
CONFIG_CRC16=m
CONFIG_CRC_T10DIF=y
@@ -10863,6 +10925,7 @@ CONFIG_LZO_DECOMPRESS=y
CONFIG_LZ4_COMPRESS=y
CONFIG_LZ4HC_COMPRESS=m
CONFIG_LZ4_DECOMPRESS=y
+CONFIG_ZSTD_COMMON=y
CONFIG_ZSTD_COMPRESS=y
CONFIG_ZSTD_DECOMPRESS=y
CONFIG_XZ_DEC=y
@@ -10921,6 +10984,7 @@ CONFIG_CMA_ALIGNMENT=8
# CONFIG_DMA_MAP_BENCHMARK is not set
CONFIG_SGL_ALLOC=y
CONFIG_CHECK_SIGNATURE=y
+# CONFIG_FORCE_NR_CPUS is not set
CONFIG_CPU_RMAP=y
CONFIG_DQL=y
CONFIG_GLOB=y
@@ -10993,6 +11057,7 @@ CONFIG_DEBUG_KERNEL=y
#
# Compile-time checks and compiler options
#
+CONFIG_AS_HAS_NON_CONST_LEB128=y
CONFIG_DEBUG_INFO_NONE=y
# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set
# CONFIG_DEBUG_INFO_DWARF4 is not set
@@ -11081,6 +11146,7 @@ CONFIG_KFENCE_NUM_OBJECTS=255
CONFIG_KFENCE_DEFERRABLE=y
# CONFIG_KFENCE_STATIC_KEYS is not set
CONFIG_KFENCE_STRESS_TEST_FAULTS=0
+CONFIG_HAVE_ARCH_KMSAN=y
# end of Memory Debugging
CONFIG_DEBUG_SHIRQ=y
@@ -11149,6 +11215,7 @@ CONFIG_DEBUG_LIST=y
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
# CONFIG_BUG_ON_DATA_CORRUPTION is not set
+# CONFIG_DEBUG_MAPLE_TREE is not set
# end of Debug kernel data structures
# CONFIG_DEBUG_CREDENTIALS is not set
@@ -11178,6 +11245,7 @@ CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y
+CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_HAVE_FENTRY=y
@@ -11276,7 +11344,6 @@ CONFIG_DEBUG_BOOT_PARAMS=y
# CONFIG_PUNIT_ATOM_DEBUG is not set
CONFIG_UNWINDER_ORC=y
# CONFIG_UNWINDER_FRAME_POINTER is not set
-# CONFIG_UNWINDER_GUESS is not set
# end of x86 Debugging
#
@@ -11311,6 +11378,7 @@ CONFIG_ASYNC_RAID6_TEST=m
# CONFIG_TEST_BITMAP is not set
# CONFIG_TEST_UUID is not set
# CONFIG_TEST_XARRAY is not set
+# CONFIG_TEST_MAPLE_TREE is not set
# CONFIG_TEST_RHASHTABLE is not set
# CONFIG_TEST_SIPHASH is not set
# CONFIG_TEST_IDA is not set
@@ -11326,6 +11394,7 @@ CONFIG_ASYNC_RAID6_TEST=m
# CONFIG_TEST_SYSCTL is not set
# CONFIG_TEST_UDELAY is not set
# CONFIG_TEST_STATIC_KEYS is not set
+# CONFIG_TEST_DYNAMIC_DEBUG is not set
# CONFIG_TEST_KMOD is not set
# CONFIG_TEST_MEMCAT_P is not set
# CONFIG_TEST_OBJAGG is not set
@@ -11338,4 +11407,9 @@ CONFIG_ARCH_USE_MEMTEST=y
# CONFIG_MEMTEST is not set
# CONFIG_HYPERV_TESTING is not set
# end of Kernel Testing and Coverage
+
+#
+# Rust hacking
+#
+# end of Rust hacking
# end of Kernel hacking