summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjc_gargma <jc_gargma@iserlohn-fortress.net>2023-02-06 06:03:59 -0800
committerjc_gargma <jc_gargma@iserlohn-fortress.net>2023-02-06 06:03:59 -0800
commit14ed2df7c719eb1c051fd86b2c529a92c02f3878 (patch)
tree5f79b3045e26d81a56f6a6d1fbd4dc419e957f96
parentUpdated to 6.1.7 (diff)
downloadlinux-14ed2df7c719eb1c051fd86b2c529a92c02f3878.tar.xz
Updated to 6.1.10
-rw-r--r--0002-Revert-drm-display-dp_mst-Move-all-payload-info-into.patch2479
-rw-r--r--0003-btrfs-fix-invalid-leaf-access-due-to-inline-extent-d.patch67
-rw-r--r--0004-wifi-mac80211-fix-initialization-of-rx-link-and-rx-l.patch459
-rw-r--r--PKGBUILD18
4 files changed, 3 insertions, 3020 deletions
diff --git a/0002-Revert-drm-display-dp_mst-Move-all-payload-info-into.patch b/0002-Revert-drm-display-dp_mst-Move-all-payload-info-into.patch
deleted file mode 100644
index 3335f6a..0000000
--- a/0002-Revert-drm-display-dp_mst-Move-all-payload-info-into.patch
+++ /dev/null
@@ -1,2479 +0,0 @@
-From 96c2a11329533bb221e884bc8b91c6fc2f305dd8 Mon Sep 17 00:00:00 2001
-From: Wayne Lin <Wayne.Lin@amd.com>
-Date: Thu, 12 Jan 2023 16:50:44 +0800
-Subject: [PATCH 2/5] Revert "drm/display/dp_mst: Move all payload info into
- the atomic state"
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-This reverts commit 4d07b0bc403403438d9cf88450506240c5faf92f.
-
-[Why]
-Changes cause regression on amdgpu mst.
-E.g.
-In fill_dc_mst_payload_table_from_drm(), amdgpu expects to add/remove payload
-one by one and call fill_dc_mst_payload_table_from_drm() to update the HW
-maintained payload table. But previous change tries to go through all the
-payloads in mst_state and update amdpug hw maintained table in once everytime
-driver only tries to add/remove a specific payload stream only. The newly
-design idea conflicts with the implementation in amdgpu nowadays.
-
-[How]
-Revert this patch first. After addressing all regression problems caused by
-this previous patch, will add it back and adjust it.
-
-Signed-off-by: Wayne Lin <Wayne.Lin@amd.com>
-Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2171
-Cc: stable@vger.kernel.org # 6.1
-Cc: Lyude Paul <lyude@redhat.com>
-Cc: Harry Wentland <harry.wentland@amd.com>
-Cc: Mario Limonciello <mario.limonciello@amd.com>
-Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
-Cc: Ben Skeggs <bskeggs@redhat.com>
-Cc: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
-Cc: Fangzhi Zuo <Jerry.Zuo@amd.com>
-Cherry-picked-for: https://bugs.archlinux.org/task/76934
----
- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 53 +-
- .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 104 ++-
- .../display/amdgpu_dm/amdgpu_dm_mst_types.c | 87 ++-
- .../amd/display/include/link_service_types.h | 3 -
- drivers/gpu/drm/display/drm_dp_mst_topology.c | 724 ++++++++++++------
- drivers/gpu/drm/i915/display/intel_dp_mst.c | 64 +-
- drivers/gpu/drm/i915/display/intel_hdcp.c | 24 +-
- drivers/gpu/drm/nouveau/dispnv50/disp.c | 169 ++--
- include/drm/display/drm_dp_mst_helper.h | 178 +++--
- 9 files changed, 876 insertions(+), 530 deletions(-)
-
-diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
-index dacad8b85963..40defd664b49 100644
---- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
-+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
-@@ -6460,7 +6460,6 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
- const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
- struct drm_dp_mst_topology_mgr *mst_mgr;
- struct drm_dp_mst_port *mst_port;
-- struct drm_dp_mst_topology_state *mst_state;
- enum dc_color_depth color_depth;
- int clock, bpp = 0;
- bool is_y420 = false;
-@@ -6474,13 +6473,6 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
- if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
- return 0;
-
-- mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr);
-- if (IS_ERR(mst_state))
-- return PTR_ERR(mst_state);
--
-- if (!mst_state->pbn_div)
-- mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_port->dc_link);
--
- if (!state->duplicated) {
- int max_bpc = conn_state->max_requested_bpc;
- is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
-@@ -6492,10 +6484,11 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
- clock = adjusted_mode->clock;
- dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
- }
--
-- dm_new_connector_state->vcpi_slots =
-- drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port,
-- dm_new_connector_state->pbn);
-+ dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_time_slots(state,
-+ mst_mgr,
-+ mst_port,
-+ dm_new_connector_state->pbn,
-+ dm_mst_get_pbn_divider(aconnector->dc_link));
- if (dm_new_connector_state->vcpi_slots < 0) {
- DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
- return dm_new_connector_state->vcpi_slots;
-@@ -6566,14 +6559,17 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
- dm_conn_state->vcpi_slots = slot_num;
-
- ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->port,
-- dm_conn_state->pbn, false);
-+ dm_conn_state->pbn, 0, false);
- if (ret < 0)
- return ret;
-
- continue;
- }
-
-- vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, true);
-+ vcpi = drm_dp_mst_atomic_enable_dsc(state,
-+ aconnector->port,
-+ pbn, pbn_div,
-+ true);
- if (vcpi < 0)
- return vcpi;
-
-@@ -9407,6 +9403,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
- struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
- #if defined(CONFIG_DRM_AMD_DC_DCN)
- struct dsc_mst_fairness_vars vars[MAX_PIPES];
-+ struct drm_dp_mst_topology_state *mst_state;
-+ struct drm_dp_mst_topology_mgr *mgr;
- #endif
-
- trace_amdgpu_dm_atomic_check_begin(state);
-@@ -9654,6 +9652,33 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
- lock_and_validation_needed = true;
- }
-
-+#if defined(CONFIG_DRM_AMD_DC_DCN)
-+ /* set the slot info for each mst_state based on the link encoding format */
-+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
-+ struct amdgpu_dm_connector *aconnector;
-+ struct drm_connector *connector;
-+ struct drm_connector_list_iter iter;
-+ u8 link_coding_cap;
-+
-+ if (!mgr->mst_state )
-+ continue;
-+
-+ drm_connector_list_iter_begin(dev, &iter);
-+ drm_for_each_connector_iter(connector, &iter) {
-+ int id = connector->index;
-+
-+ if (id == mst_state->mgr->conn_base_id) {
-+ aconnector = to_amdgpu_dm_connector(connector);
-+ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
-+ drm_dp_mst_update_slots(mst_state, link_coding_cap);
-+
-+ break;
-+ }
-+ }
-+ drm_connector_list_iter_end(&iter);
-+
-+ }
-+#endif
- /**
- * Streams and planes are reset when there are changes that affect
- * bandwidth. Anything that affects bandwidth needs to go through
-diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
-index f72c013d3a5b..c8f9d10fde17 100644
---- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
-+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
-@@ -27,7 +27,6 @@
- #include <linux/acpi.h>
- #include <linux/i2c.h>
-
--#include <drm/drm_atomic.h>
- #include <drm/drm_probe_helper.h>
- #include <drm/amdgpu_drm.h>
- #include <drm/drm_edid.h>
-@@ -120,27 +119,40 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
- }
-
- static void
--fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state,
-- struct amdgpu_dm_connector *aconnector,
-- struct dc_dp_mst_stream_allocation_table *table)
-+fill_dc_mst_payload_table_from_drm(struct amdgpu_dm_connector *aconnector,
-+ struct dc_dp_mst_stream_allocation_table *proposed_table)
- {
-- struct dc_dp_mst_stream_allocation_table new_table = { 0 };
-- struct dc_dp_mst_stream_allocation *sa;
-- struct drm_dp_mst_atomic_payload *payload;
--
-- /* Fill payload info*/
-- list_for_each_entry(payload, &mst_state->payloads, next) {
-- if (payload->delete)
-- continue;
--
-- sa = &new_table.stream_allocations[new_table.stream_count];
-- sa->slot_count = payload->time_slots;
-- sa->vcp_id = payload->vcpi;
-- new_table.stream_count++;
-+ int i;
-+ struct drm_dp_mst_topology_mgr *mst_mgr =
-+ &aconnector->mst_port->mst_mgr;
-+
-+ mutex_lock(&mst_mgr->payload_lock);
-+
-+ proposed_table->stream_count = 0;
-+
-+ /* number of active streams */
-+ for (i = 0; i < mst_mgr->max_payloads; i++) {
-+ if (mst_mgr->payloads[i].num_slots == 0)
-+ break; /* end of vcp_id table */
-+
-+ ASSERT(mst_mgr->payloads[i].payload_state !=
-+ DP_PAYLOAD_DELETE_LOCAL);
-+
-+ if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
-+ mst_mgr->payloads[i].payload_state ==
-+ DP_PAYLOAD_REMOTE) {
-+
-+ struct dc_dp_mst_stream_allocation *sa =
-+ &proposed_table->stream_allocations[
-+ proposed_table->stream_count];
-+
-+ sa->slot_count = mst_mgr->payloads[i].num_slots;
-+ sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
-+ proposed_table->stream_count++;
-+ }
- }
-
-- /* Overwrite the old table */
-- *table = new_table;
-+ mutex_unlock(&mst_mgr->payload_lock);
- }
-
- void dm_helpers_dp_update_branch_info(
-@@ -158,9 +170,11 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
- bool enable)
- {
- struct amdgpu_dm_connector *aconnector;
-- struct drm_dp_mst_topology_state *mst_state;
-- struct drm_dp_mst_atomic_payload *payload;
-+ struct dm_connector_state *dm_conn_state;
- struct drm_dp_mst_topology_mgr *mst_mgr;
-+ struct drm_dp_mst_port *mst_port;
-+ bool ret;
-+ u8 link_coding_cap = DP_8b_10b_ENCODING;
-
- aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
- /* Accessing the connector state is required for vcpi_slots allocation
-@@ -171,21 +185,40 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
- if (!aconnector || !aconnector->mst_port)
- return false;
-
-+ dm_conn_state = to_dm_connector_state(aconnector->base.state);
-+
- mst_mgr = &aconnector->mst_port->mst_mgr;
-- mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
-+
-+ if (!mst_mgr->mst_state)
-+ return false;
-+
-+ mst_port = aconnector->port;
-+
-+#if defined(CONFIG_DRM_AMD_DC_DCN)
-+ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
-+#endif
-+
-+ if (enable) {
-+
-+ ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port,
-+ dm_conn_state->pbn,
-+ dm_conn_state->vcpi_slots);
-+ if (!ret)
-+ return false;
-+
-+ } else {
-+ drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
-+ }
-
- /* It's OK for this to fail */
-- payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
-- if (enable)
-- drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
-- else
-- drm_dp_remove_payload(mst_mgr, mst_state, payload);
-+ drm_dp_update_payload_part1(mst_mgr, (link_coding_cap == DP_CAP_ANSI_128B132B) ? 0:1);
-
- /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
- * AUX message. The sequence is slot 1-63 allocated sequence for each
- * stream. AMD ASIC stream slot allocation should follow the same
- * sequence. copy DRM MST allocation to dc */
-- fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table);
-+
-+ fill_dc_mst_payload_table_from_drm(aconnector, proposed_table);
-
- return true;
- }
-@@ -242,9 +275,8 @@ bool dm_helpers_dp_mst_send_payload_allocation(
- bool enable)
- {
- struct amdgpu_dm_connector *aconnector;
-- struct drm_dp_mst_topology_state *mst_state;
- struct drm_dp_mst_topology_mgr *mst_mgr;
-- struct drm_dp_mst_atomic_payload *payload;
-+ struct drm_dp_mst_port *mst_port;
- enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD;
- enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
-
-@@ -253,16 +285,19 @@ bool dm_helpers_dp_mst_send_payload_allocation(
- if (!aconnector || !aconnector->mst_port)
- return false;
-
-+ mst_port = aconnector->port;
-+
- mst_mgr = &aconnector->mst_port->mst_mgr;
-- mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
-
-- payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
-+ if (!mst_mgr->mst_state)
-+ return false;
-+
- if (!enable) {
- set_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
- clr_flag = MST_ALLOCATE_NEW_PAYLOAD;
- }
-
-- if (enable && drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload)) {
-+ if (drm_dp_update_payload_part2(mst_mgr)) {
- amdgpu_dm_set_mst_status(&aconnector->mst_status,
- set_flag, false);
- } else {
-@@ -272,6 +307,9 @@ bool dm_helpers_dp_mst_send_payload_allocation(
- clr_flag, false);
- }
-
-+ if (!enable)
-+ drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
-+
- return true;
- }
-
-diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
-index 6483ba266893..d57f1528a295 100644
---- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
-+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
-@@ -598,8 +598,15 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
-
- dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap);
- aconnector->mst_mgr.cbs = &dm_mst_cbs;
-- drm_dp_mst_topology_mgr_init(&aconnector->mst_mgr, adev_to_drm(dm->adev),
-- &aconnector->dm_dp_aux.aux, 16, 4, aconnector->connector_id);
-+ drm_dp_mst_topology_mgr_init(
-+ &aconnector->mst_mgr,
-+ adev_to_drm(dm->adev),
-+ &aconnector->dm_dp_aux.aux,
-+ 16,
-+ 4,
-+ max_link_enc_cap.lane_count,
-+ drm_dp_bw_code_to_link_rate(max_link_enc_cap.link_rate),
-+ aconnector->connector_id);
-
- drm_connector_attach_dp_subconnector_property(&aconnector->base);
- }
-@@ -703,13 +710,12 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
- return dsc_config.bits_per_pixel;
- }
-
--static int increase_dsc_bpp(struct drm_atomic_state *state,
-- struct drm_dp_mst_topology_state *mst_state,
-- struct dc_link *dc_link,
-- struct dsc_mst_fairness_params *params,
-- struct dsc_mst_fairness_vars *vars,
-- int count,
-- int k)
-+static bool increase_dsc_bpp(struct drm_atomic_state *state,
-+ struct dc_link *dc_link,
-+ struct dsc_mst_fairness_params *params,
-+ struct dsc_mst_fairness_vars *vars,
-+ int count,
-+ int k)
- {
- int i;
- bool bpp_increased[MAX_PIPES];
-@@ -717,10 +723,13 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
- int min_initial_slack;
- int next_index;
- int remaining_to_increase = 0;
-+ int pbn_per_timeslot;
- int link_timeslots_used;
- int fair_pbn_alloc;
- int ret = 0;
-
-+ pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link);
-+
- for (i = 0; i < count; i++) {
- if (vars[i + k].dsc_enabled) {
- initial_slack[i] =
-@@ -751,17 +760,18 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
- link_timeslots_used = 0;
-
- for (i = 0; i < count; i++)
-- link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div);
-+ link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, pbn_per_timeslot);
-
-- fair_pbn_alloc =
-- (63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div;
-+ fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
-
- if (initial_slack[next_index] > fair_pbn_alloc) {
- vars[next_index].pbn += fair_pbn_alloc;
-+
- ret = drm_dp_atomic_find_time_slots(state,
- params[next_index].port->mgr,
- params[next_index].port,
-- vars[next_index].pbn);
-+ vars[next_index].pbn,
-+ pbn_per_timeslot);
- if (ret < 0)
- return ret;
-
-@@ -773,7 +783,8 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
- ret = drm_dp_atomic_find_time_slots(state,
- params[next_index].port->mgr,
- params[next_index].port,
-- vars[next_index].pbn);
-+ vars[next_index].pbn,
-+ pbn_per_timeslot);
- if (ret < 0)
- return ret;
- }
-@@ -782,7 +793,8 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
- ret = drm_dp_atomic_find_time_slots(state,
- params[next_index].port->mgr,
- params[next_index].port,
-- vars[next_index].pbn);
-+ vars[next_index].pbn,
-+ pbn_per_timeslot);
- if (ret < 0)
- return ret;
-
-@@ -794,7 +806,8 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
- ret = drm_dp_atomic_find_time_slots(state,
- params[next_index].port->mgr,
- params[next_index].port,
-- vars[next_index].pbn);
-+ vars[next_index].pbn,
-+ pbn_per_timeslot);
- if (ret < 0)
- return ret;
- }
-@@ -850,10 +863,12 @@ static int try_disable_dsc(struct drm_atomic_state *state,
- break;
-
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
-+
- ret = drm_dp_atomic_find_time_slots(state,
- params[next_index].port->mgr,
- params[next_index].port,
-- vars[next_index].pbn);
-+ vars[next_index].pbn,
-+ dm_mst_get_pbn_divider(dc_link));
- if (ret < 0)
- return ret;
-
-@@ -863,10 +878,12 @@ static int try_disable_dsc(struct drm_atomic_state *state,
- vars[next_index].bpp_x16 = 0;
- } else {
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
-+
- ret = drm_dp_atomic_find_time_slots(state,
- params[next_index].port->mgr,
- params[next_index].port,
-- vars[next_index].pbn);
-+ vars[next_index].pbn,
-+ dm_mst_get_pbn_divider(dc_link));
- if (ret < 0)
- return ret;
- }
-@@ -877,31 +894,21 @@ static int try_disable_dsc(struct drm_atomic_state *state,
- return 0;
- }
-
--static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
-- struct dc_state *dc_state,
-- struct dc_link *dc_link,
-- struct dsc_mst_fairness_vars *vars,
-- struct drm_dp_mst_topology_mgr *mgr,
-- int *link_vars_start_index)
-+static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
-+ struct dc_state *dc_state,
-+ struct dc_link *dc_link,
-+ struct dsc_mst_fairness_vars *vars,
-+ int *link_vars_start_index)
- {
-+ int i, k, ret;
- struct dc_stream_state *stream;
- struct dsc_mst_fairness_params params[MAX_PIPES];
- struct amdgpu_dm_connector *aconnector;
-- struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr);
- int count = 0;
-- int i, k, ret;
- bool debugfs_overwrite = false;
-
- memset(params, 0, sizeof(params));
-
-- if (IS_ERR(mst_state))
-- return PTR_ERR(mst_state);
--
-- mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link);
--#if defined(CONFIG_DRM_AMD_DC_DCN)
-- drm_dp_mst_update_slots(mst_state, dc_link_dp_mst_decide_link_encoding_format(dc_link));
--#endif
--
- /* Set up params */
- for (i = 0; i < dc_state->stream_count; i++) {
- struct dc_dsc_policy dsc_policy = {0};
-@@ -961,7 +968,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
- vars[i + k].dsc_enabled = false;
- vars[i + k].bpp_x16 = 0;
- ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
-- vars[i + k].pbn);
-+ vars[i + k].pbn, dm_mst_get_pbn_divider(dc_link));
- if (ret < 0)
- return ret;
- }
-@@ -980,7 +987,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
- vars[i + k].dsc_enabled = true;
- vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
- ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
-- params[i].port, vars[i + k].pbn);
-+ params[i].port, vars[i + k].pbn, dm_mst_get_pbn_divider(dc_link));
- if (ret < 0)
- return ret;
- } else {
-@@ -988,7 +995,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
- vars[i + k].dsc_enabled = false;
- vars[i + k].bpp_x16 = 0;
- ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
-- params[i].port, vars[i + k].pbn);
-+ params[i].port, vars[i + k].pbn, dm_mst_get_pbn_divider(dc_link));
- if (ret < 0)
- return ret;
- }
-@@ -998,7 +1005,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
- return ret;
-
- /* Optimize degree of compression */
-- ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k);
-+ ret = increase_dsc_bpp(state, dc_link, params, vars, count, k);
- if (ret < 0)
- return ret;
-
-@@ -1148,7 +1155,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
- continue;
-
- mst_mgr = aconnector->port->mgr;
-- ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
-+ ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars,
- &link_vars_start_index);
- if (ret != 0)
- return ret;
-@@ -1206,7 +1213,7 @@ static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
- continue;
-
- mst_mgr = aconnector->port->mgr;
-- ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
-+ ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars,
- &link_vars_start_index);
- if (ret != 0)
- return ret;
-diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
-index d1e91d31d151..0889c2a86733 100644
---- a/drivers/gpu/drm/amd/display/include/link_service_types.h
-+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
-@@ -252,9 +252,6 @@ union dpcd_training_lane_set {
- * _ONLY_ be filled out from DM and then passed to DC, do NOT use these for _any_ kind of atomic
- * state calculations in DM, or you will break something.
- */
--
--struct drm_dp_mst_port;
--
- /* DP MST stream allocation (payload bandwidth number) */
- struct dc_dp_mst_stream_allocation {
- uint8_t vcp_id;
-diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
-index 51a46689cda7..95ff57d20216 100644
---- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
-+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
-@@ -68,7 +68,8 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
- static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
-
- static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
-- int id, u8 start_slot, u8 num_slots);
-+ int id,
-+ struct drm_dp_payload *payload);
-
- static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port,
-@@ -1234,6 +1235,57 @@ build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
- return 0;
- }
-
-+static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
-+ struct drm_dp_vcpi *vcpi)
-+{
-+ int ret, vcpi_ret;
-+
-+ mutex_lock(&mgr->payload_lock);
-+ ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
-+ if (ret > mgr->max_payloads) {
-+ ret = -EINVAL;
-+ drm_dbg_kms(mgr->dev, "out of payload ids %d\n", ret);
-+ goto out_unlock;
-+ }
-+
-+ vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
-+ if (vcpi_ret > mgr->max_payloads) {
-+ ret = -EINVAL;
-+ drm_dbg_kms(mgr->dev, "out of vcpi ids %d\n", ret);
-+ goto out_unlock;
-+ }
-+
-+ set_bit(ret, &mgr->payload_mask);
-+ set_bit(vcpi_ret, &mgr->vcpi_mask);
-+ vcpi->vcpi = vcpi_ret + 1;
-+ mgr->proposed_vcpis[ret - 1] = vcpi;
-+out_unlock:
-+ mutex_unlock(&mgr->payload_lock);
-+ return ret;
-+}
-+
-+static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
-+ int vcpi)
-+{
-+ int i;
-+
-+ if (vcpi == 0)
-+ return;
-+
-+ mutex_lock(&mgr->payload_lock);
-+ drm_dbg_kms(mgr->dev, "putting payload %d\n", vcpi);
-+ clear_bit(vcpi - 1, &mgr->vcpi_mask);
-+
-+ for (i = 0; i < mgr->max_payloads; i++) {
-+ if (mgr->proposed_vcpis[i] &&
-+ mgr->proposed_vcpis[i]->vcpi == vcpi) {
-+ mgr->proposed_vcpis[i] = NULL;
-+ clear_bit(i + 1, &mgr->payload_mask);
-+ }
-+ }
-+ mutex_unlock(&mgr->payload_lock);
-+}
-+
- static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_sideband_msg_tx *txmsg)
- {
-@@ -1686,7 +1738,7 @@ drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
- #define save_port_topology_ref(port, type)
- #endif
-
--struct drm_dp_mst_atomic_payload *
-+static struct drm_dp_mst_atomic_payload *
- drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
- struct drm_dp_mst_port *port)
- {
-@@ -1698,7 +1750,6 @@ drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
-
- return NULL;
- }
--EXPORT_SYMBOL(drm_atomic_get_mst_payload_state);
-
- static void drm_dp_destroy_mst_branch_device(struct kref *kref)
- {
-@@ -3201,8 +3252,6 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port,
- struct drm_dp_query_stream_enc_status_ack_reply *status)
- {
-- struct drm_dp_mst_topology_state *state;
-- struct drm_dp_mst_atomic_payload *payload;
- struct drm_dp_sideband_msg_tx *txmsg;
- u8 nonce[7];
- int ret;
-@@ -3219,10 +3268,6 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
-
- get_random_bytes(nonce, sizeof(nonce));
-
-- drm_modeset_lock(&mgr->base.lock, NULL);
-- state = to_drm_dp_mst_topology_state(mgr->base.state);
-- payload = drm_atomic_get_mst_payload_state(state, port);
--
- /*
- * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
- * transaction at the MST Branch device directly connected to the
-@@ -3230,7 +3275,7 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
- */
- txmsg->dst = mgr->mst_primary;
-
-- build_query_stream_enc_status(txmsg, payload->vcpi, nonce);
-+ build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce);
-
- drm_dp_queue_down_tx(mgr, txmsg);
-
-@@ -3247,7 +3292,6 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
- memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
-
- out:
-- drm_modeset_unlock(&mgr->base.lock);
- drm_dp_mst_topology_put_port(port);
- out_get_port:
- kfree(txmsg);
-@@ -3256,162 +3300,238 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
- EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
-
- static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
-- struct drm_dp_mst_atomic_payload *payload)
-+ int id,
-+ struct drm_dp_payload *payload)
- {
-- return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot,
-- payload->time_slots);
-+ int ret;
-+
-+ ret = drm_dp_dpcd_write_payload(mgr, id, payload);
-+ if (ret < 0) {
-+ payload->payload_state = 0;
-+ return ret;
-+ }
-+ payload->payload_state = DP_PAYLOAD_LOCAL;
-+ return 0;
- }
-
- static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
-- struct drm_dp_mst_atomic_payload *payload)
-+ struct drm_dp_mst_port *port,
-+ int id,
-+ struct drm_dp_payload *payload)
- {
- int ret;
-- struct drm_dp_mst_port *port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
--
-- if (!port)
-- return -EIO;
-
-- ret = drm_dp_payload_send_msg(mgr, port, payload->vcpi, payload->pbn);
-- drm_dp_mst_topology_put_port(port);
-+ ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
-+ if (ret < 0)
-+ return ret;
-+ payload->payload_state = DP_PAYLOAD_REMOTE;
- return ret;
- }
-
- static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
-- struct drm_dp_mst_topology_state *mst_state,
-- struct drm_dp_mst_atomic_payload *payload)
-+ struct drm_dp_mst_port *port,
-+ int id,
-+ struct drm_dp_payload *payload)
- {
- drm_dbg_kms(mgr->dev, "\n");
--
- /* it's okay for these to fail */
-- drm_dp_payload_send_msg(mgr, payload->port, payload->vcpi, 0);
-- drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0);
-+ if (port) {
-+ drm_dp_payload_send_msg(mgr, port, id, 0);
-+ }
-
-+ drm_dp_dpcd_write_payload(mgr, id, payload);
-+ payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
-+ return 0;
-+}
-+
-+static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
-+ int id,
-+ struct drm_dp_payload *payload)
-+{
-+ payload->payload_state = 0;
- return 0;
- }
-
- /**
-- * drm_dp_add_payload_part1() - Execute payload update part 1
-- * @mgr: Manager to use.
-- * @mst_state: The MST atomic state
-- * @payload: The payload to write
-+ * drm_dp_update_payload_part1() - Execute payload update part 1
-+ * @mgr: manager to use.
-+ * @start_slot: this is the cur slot
-+ *
-+ * NOTE: start_slot is a temporary workaround for non-atomic drivers,
-+ * this will be removed when non-atomic mst helpers are moved out of the helper
- *
-- * Determines the starting time slot for the given payload, and programs the VCPI for this payload
-- * into hardware. After calling this, the driver should generate ACT and payload packets.
-+ * This iterates over all proposed virtual channels, and tries to
-+ * allocate space in the link for them. For 0->slots transitions,
-+ * this step just writes the VCPI to the MST device. For slots->0
-+ * transitions, this writes the updated VCPIs and removes the
-+ * remote VC payloads.
- *
-- * Returns: 0 on success, error code on failure. In the event that this fails,
-- * @payload.vc_start_slot will also be set to -1.
-+ * after calling this the driver should generate ACT and payload
-+ * packets.
- */
--int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
-- struct drm_dp_mst_topology_state *mst_state,
-- struct drm_dp_mst_atomic_payload *payload)
-+int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot)
- {
-+ struct drm_dp_payload req_payload;
- struct drm_dp_mst_port *port;
-- int ret;
-+ int i, j;
-+ int cur_slots = start_slot;
-+ bool skip;
-
-- port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
-- if (!port)
-- return 0;
-+ mutex_lock(&mgr->payload_lock);
-+ for (i = 0; i < mgr->max_payloads; i++) {
-+ struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
-+ struct drm_dp_payload *payload = &mgr->payloads[i];
-+ bool put_port = false;
-
-- if (mgr->payload_count == 0)
-- mgr->next_start_slot = mst_state->start_slot;
-+ /* solve the current payloads - compare to the hw ones
-+ - update the hw view */
-+ req_payload.start_slot = cur_slots;
-+ if (vcpi) {
-+ port = container_of(vcpi, struct drm_dp_mst_port,
-+ vcpi);
-
-- payload->vc_start_slot = mgr->next_start_slot;
-+ mutex_lock(&mgr->lock);
-+ skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
-+ mutex_unlock(&mgr->lock);
-
-- ret = drm_dp_create_payload_step1(mgr, payload);
-- drm_dp_mst_topology_put_port(port);
-- if (ret < 0) {
-- drm_warn(mgr->dev, "Failed to create MST payload for port %p: %d\n",
-- payload->port, ret);
-- payload->vc_start_slot = -1;
-- return ret;
-- }
-+ if (skip) {
-+ drm_dbg_kms(mgr->dev,
-+ "Virtual channel %d is not in current topology\n",
-+ i);
-+ continue;
-+ }
-+ /* Validated ports don't matter if we're releasing
-+ * VCPI
-+ */
-+ if (vcpi->num_slots) {
-+ port = drm_dp_mst_topology_get_port_validated(
-+ mgr, port);
-+ if (!port) {
-+ if (vcpi->num_slots == payload->num_slots) {
-+ cur_slots += vcpi->num_slots;
-+ payload->start_slot = req_payload.start_slot;
-+ continue;
-+ } else {
-+ drm_dbg_kms(mgr->dev,
-+ "Fail:set payload to invalid sink");
-+ mutex_unlock(&mgr->payload_lock);
-+ return -EINVAL;
-+ }
-+ }
-+ put_port = true;
-+ }
-
-- mgr->payload_count++;
-- mgr->next_start_slot += payload->time_slots;
-+ req_payload.num_slots = vcpi->num_slots;
-+ req_payload.vcpi = vcpi->vcpi;
-+ } else {
-+ port = NULL;
-+ req_payload.num_slots = 0;
-+ }
-
-- return 0;
--}
--EXPORT_SYMBOL(drm_dp_add_payload_part1);
-+ payload->start_slot = req_payload.start_slot;
-+ /* work out what is required to happen with this payload */
-+ if (payload->num_slots != req_payload.num_slots) {
-+
-+ /* need to push an update for this payload */
-+ if (req_payload.num_slots) {
-+ drm_dp_create_payload_step1(mgr, vcpi->vcpi,
-+ &req_payload);
-+ payload->num_slots = req_payload.num_slots;
-+ payload->vcpi = req_payload.vcpi;
-+
-+ } else if (payload->num_slots) {
-+ payload->num_slots = 0;
-+ drm_dp_destroy_payload_step1(mgr, port,
-+ payload->vcpi,
-+ payload);
-+ req_payload.payload_state =
-+ payload->payload_state;
-+ payload->start_slot = 0;
-+ }
-+ payload->payload_state = req_payload.payload_state;
-+ }
-+ cur_slots += req_payload.num_slots;
-
--/**
-- * drm_dp_remove_payload() - Remove an MST payload
-- * @mgr: Manager to use.
-- * @mst_state: The MST atomic state
-- * @payload: The payload to write
-- *
-- * Removes a payload from an MST topology if it was successfully assigned a start slot. Also updates
-- * the starting time slots of all other payloads which would have been shifted towards the start of
-- * the VC table as a result. After calling this, the driver should generate ACT and payload packets.
-- */
--void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
-- struct drm_dp_mst_topology_state *mst_state,
-- struct drm_dp_mst_atomic_payload *payload)
--{
-- struct drm_dp_mst_atomic_payload *pos;
-- bool send_remove = false;
-+ if (put_port)
-+ drm_dp_mst_topology_put_port(port);
-+ }
-
-- /* We failed to make the payload, so nothing to do */
-- if (payload->vc_start_slot == -1)
-- return;
-+ for (i = 0; i < mgr->max_payloads; /* do nothing */) {
-+ if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
-+ i++;
-+ continue;
-+ }
-
-- mutex_lock(&mgr->lock);
-- send_remove = drm_dp_mst_port_downstream_of_branch(payload->port, mgr->mst_primary);
-- mutex_unlock(&mgr->lock);
-+ drm_dbg_kms(mgr->dev, "removing payload %d\n", i);
-+ for (j = i; j < mgr->max_payloads - 1; j++) {
-+ mgr->payloads[j] = mgr->payloads[j + 1];
-+ mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
-
-- if (send_remove)
-- drm_dp_destroy_payload_step1(mgr, mst_state, payload);
-- else
-- drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n",
-- payload->vcpi);
-+ if (mgr->proposed_vcpis[j] &&
-+ mgr->proposed_vcpis[j]->num_slots) {
-+ set_bit(j + 1, &mgr->payload_mask);
-+ } else {
-+ clear_bit(j + 1, &mgr->payload_mask);
-+ }
-+ }
-
-- list_for_each_entry(pos, &mst_state->payloads, next) {
-- if (pos != payload && pos->vc_start_slot > payload->vc_start_slot)
-- pos->vc_start_slot -= payload->time_slots;
-+ memset(&mgr->payloads[mgr->max_payloads - 1], 0,
-+ sizeof(struct drm_dp_payload));
-+ mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
-+ clear_bit(mgr->max_payloads, &mgr->payload_mask);
- }
-- payload->vc_start_slot = -1;
-+ mutex_unlock(&mgr->payload_lock);
-
-- mgr->payload_count--;
-- mgr->next_start_slot -= payload->time_slots;
-+ return 0;
- }
--EXPORT_SYMBOL(drm_dp_remove_payload);
-+EXPORT_SYMBOL(drm_dp_update_payload_part1);
-
- /**
-- * drm_dp_add_payload_part2() - Execute payload update part 2
-- * @mgr: Manager to use.
-- * @state: The global atomic state
-- * @payload: The payload to update
-- *
-- * If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this
-- * function will send the sideband messages to finish allocating this payload.
-+ * drm_dp_update_payload_part2() - Execute payload update part 2
-+ * @mgr: manager to use.
- *
-- * Returns: 0 on success, negative error code on failure.
-+ * This iterates over all proposed virtual channels, and tries to
-+ * allocate space in the link for them. For 0->slots transitions,
-+ * this step writes the remote VC payload commands. For slots->0
-+ * this just resets some internal state.
- */
--int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
-- struct drm_atomic_state *state,
-- struct drm_dp_mst_atomic_payload *payload)
-+int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
- {
-+ struct drm_dp_mst_port *port;
-+ int i;
- int ret = 0;
-+ bool skip;
-
-- /* Skip failed payloads */
-- if (payload->vc_start_slot == -1) {
-- drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
-- payload->port->connector->name);
-- return -EIO;
-- }
-+ mutex_lock(&mgr->payload_lock);
-+ for (i = 0; i < mgr->max_payloads; i++) {
-
-- ret = drm_dp_create_payload_step2(mgr, payload);
-- if (ret < 0) {
-- if (!payload->delete)
-- drm_err(mgr->dev, "Step 2 of creating MST payload for %p failed: %d\n",
-- payload->port, ret);
-- else
-- drm_dbg_kms(mgr->dev, "Step 2 of removing MST payload for %p failed: %d\n",
-- payload->port, ret);
-- }
-+ if (!mgr->proposed_vcpis[i])
-+ continue;
-
-- return ret;
-+ port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
-+
-+ mutex_lock(&mgr->lock);
-+ skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
-+ mutex_unlock(&mgr->lock);
-+
-+ if (skip)
-+ continue;
-+
-+ drm_dbg_kms(mgr->dev, "payload %d %d\n", i, mgr->payloads[i].payload_state);
-+ if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
-+ ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
-+ } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
-+ ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
-+ }
-+ if (ret) {
-+ mutex_unlock(&mgr->payload_lock);
-+ return ret;
-+ }
-+ }
-+ mutex_unlock(&mgr->payload_lock);
-+ return 0;
- }
--EXPORT_SYMBOL(drm_dp_add_payload_part2);
-+EXPORT_SYMBOL(drm_dp_update_payload_part2);
-
- static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port,
-@@ -3591,6 +3711,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
- int ret = 0;
- struct drm_dp_mst_branch *mstb = NULL;
-
-+ mutex_lock(&mgr->payload_lock);
- mutex_lock(&mgr->lock);
- if (mst_state == mgr->mst_state)
- goto out_unlock;
-@@ -3598,6 +3719,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
- mgr->mst_state = mst_state;
- /* set the device into MST mode */
- if (mst_state) {
-+ struct drm_dp_payload reset_pay;
-+ int lane_count;
-+ int link_rate;
-+
- WARN_ON(mgr->mst_primary);
-
- /* get dpcd info */
-@@ -3608,6 +3733,16 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
- goto out_unlock;
- }
-
-+ lane_count = min_t(int, mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, mgr->max_lane_count);
-+ link_rate = min_t(int, drm_dp_bw_code_to_link_rate(mgr->dpcd[1]), mgr->max_link_rate);
-+ mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr,
-+ link_rate,
-+ lane_count);
-+ if (mgr->pbn_div == 0) {
-+ ret = -EINVAL;
-+ goto out_unlock;
-+ }
-+
- /* add initial branch device at LCT 1 */
- mstb = drm_dp_add_mst_branch_device(1, NULL);
- if (mstb == NULL) {
-@@ -3627,8 +3762,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
- if (ret < 0)
- goto out_unlock;
-
-- /* Write reset payload */
-- drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f);
-+ reset_pay.start_slot = 0;
-+ reset_pay.num_slots = 0x3f;
-+ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
-
- queue_work(system_long_wq, &mgr->work);
-
-@@ -3640,11 +3776,19 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
- /* this can fail if the device is gone */
- drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
- ret = 0;
-+ memset(mgr->payloads, 0,
-+ mgr->max_payloads * sizeof(mgr->payloads[0]));
-+ memset(mgr->proposed_vcpis, 0,
-+ mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
-+ mgr->payload_mask = 0;
-+ set_bit(0, &mgr->payload_mask);
-+ mgr->vcpi_mask = 0;
- mgr->payload_id_table_cleared = false;
- }
-
- out_unlock:
- mutex_unlock(&mgr->lock);
-+ mutex_unlock(&mgr->payload_lock);
- if (mstb)
- drm_dp_mst_topology_put_mstb(mstb);
- return ret;
-@@ -4163,18 +4307,62 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
- }
- EXPORT_SYMBOL(drm_dp_mst_get_edid);
-
-+/**
-+ * drm_dp_find_vcpi_slots() - Find time slots for this PBN value
-+ * @mgr: manager to use
-+ * @pbn: payload bandwidth to convert into slots.
-+ *
-+ * Calculate the number of time slots that will be required for the given PBN
-+ * value. This function is deprecated, and should not be used in atomic
-+ * drivers.
-+ *
-+ * RETURNS:
-+ * The total slots required for this port, or error.
-+ */
-+int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
-+ int pbn)
-+{
-+ int num_slots;
-+
-+ num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
-+
-+ /* max. time slots - one slot for MTP header */
-+ if (num_slots > 63)
-+ return -ENOSPC;
-+ return num_slots;
-+}
-+EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
-+
-+static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
-+ struct drm_dp_vcpi *vcpi, int pbn, int slots)
-+{
-+ int ret;
-+
-+ vcpi->pbn = pbn;
-+ vcpi->aligned_pbn = slots * mgr->pbn_div;
-+ vcpi->num_slots = slots;
-+
-+ ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
-+ if (ret < 0)
-+ return ret;
-+ return 0;
-+}
-+
- /**
- * drm_dp_atomic_find_time_slots() - Find and add time slots to the state
- * @state: global atomic state
- * @mgr: MST topology manager for the port
- * @port: port to find time slots for
- * @pbn: bandwidth required for the mode in PBN
-+ * @pbn_div: divider for DSC mode that takes FEC into account
- *
-- * Allocates time slots to @port, replacing any previous time slot allocations it may
-- * have had. Any atomic drivers which support MST must call this function in
-- * their &drm_encoder_helper_funcs.atomic_check() callback unconditionally to
-- * change the current time slot allocation for the new state, and ensure the MST
-- * atomic state is added whenever the state of payloads in the topology changes.
-+ * Allocates time slots to @port, replacing any previous timeslot allocations it
-+ * may have had. Any atomic drivers which support MST must call this function
-+ * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
-+ * current timeslot allocation for the new state, but only when
-+ * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
-+ * to ensure compatibility with userspace applications that still use the
-+ * legacy modesetting UAPI.
- *
- * Allocations set by this function are not checked against the bandwidth
- * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
-@@ -4193,7 +4381,8 @@ EXPORT_SYMBOL(drm_dp_mst_get_edid);
- */
- int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
- struct drm_dp_mst_topology_mgr *mgr,
-- struct drm_dp_mst_port *port, int pbn)
-+ struct drm_dp_mst_port *port, int pbn,
-+ int pbn_div)
- {
- struct drm_dp_mst_topology_state *topology_state;
- struct drm_dp_mst_atomic_payload *payload = NULL;
-@@ -4226,7 +4415,10 @@ int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
- }
- }
-
-- req_slots = DIV_ROUND_UP(pbn, topology_state->pbn_div);
-+ if (pbn_div <= 0)
-+ pbn_div = mgr->pbn_div;
-+
-+ req_slots = DIV_ROUND_UP(pbn, pbn_div);
-
- drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n",
- port->connector->base.id, port->connector->name,
-@@ -4235,7 +4427,7 @@ int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
- port->connector->base.id, port->connector->name,
- port, prev_bw, pbn);
-
-- /* Add the new allocation to the state, note the VCPI isn't assigned until the end */
-+ /* Add the new allocation to the state */
- if (!payload) {
- payload = kzalloc(sizeof(*payload), GFP_KERNEL);
- if (!payload)
-@@ -4243,7 +4435,6 @@ int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
-
- drm_dp_mst_get_port_malloc(port);
- payload->port = port;
-- payload->vc_start_slot = -1;
- list_add(&payload->next, &topology_state->payloads);
- }
- payload->time_slots = req_slots;
-@@ -4260,12 +4451,10 @@ EXPORT_SYMBOL(drm_dp_atomic_find_time_slots);
- * @port: The port to release the time slots from
- *
- * Releases any time slots that have been allocated to a port in the atomic
-- * state. Any atomic drivers which support MST must call this function
-- * unconditionally in their &drm_connector_helper_funcs.atomic_check() callback.
-- * This helper will check whether time slots would be released by the new state and
-- * respond accordingly, along with ensuring the MST state is always added to the
-- * atomic state whenever a new state would modify the state of payloads on the
-- * topology.
-+ * state. Any atomic drivers which support MST must call this function in
-+ * their &drm_connector_helper_funcs.atomic_check() callback when the
-+ * connector will no longer have VCPI allocated (e.g. because its CRTC was
-+ * removed) when it had VCPI allocated in the previous atomic state.
- *
- * It is OK to call this even if @port has been removed from the system.
- * Additionally, it is OK to call this function multiple times on the same
-@@ -4330,7 +4519,6 @@ int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
- drm_dp_mst_put_port_malloc(port);
- payload->pbn = 0;
- payload->delete = true;
-- topology_state->payload_mask &= ~BIT(payload->vcpi - 1);
- }
-
- return 0;
-@@ -4381,8 +4569,7 @@ int drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state)
- EXPORT_SYMBOL(drm_dp_mst_atomic_setup_commit);
-
- /**
-- * drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies,
-- * prepare new MST state for commit
-+ * drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies
- * @state: global atomic state
- *
- * Goes through any MST topologies in this atomic state, and waits for any pending commits which
-@@ -4400,30 +4587,17 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_setup_commit);
- */
- void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state)
- {
-- struct drm_dp_mst_topology_state *old_mst_state, *new_mst_state;
-+ struct drm_dp_mst_topology_state *old_mst_state;
- struct drm_dp_mst_topology_mgr *mgr;
-- struct drm_dp_mst_atomic_payload *old_payload, *new_payload;
- int i, j, ret;
-
-- for_each_oldnew_mst_mgr_in_state(state, mgr, old_mst_state, new_mst_state, i) {
-+ for_each_old_mst_mgr_in_state(state, mgr, old_mst_state, i) {
- for (j = 0; j < old_mst_state->num_commit_deps; j++) {
- ret = drm_crtc_commit_wait(old_mst_state->commit_deps[j]);
- if (ret < 0)
- drm_err(state->dev, "Failed to wait for %s: %d\n",
- old_mst_state->commit_deps[j]->crtc->name, ret);
- }
--
-- /* Now that previous state is committed, it's safe to copy over the start slot
-- * assignments
-- */
-- list_for_each_entry(old_payload, &old_mst_state->payloads, next) {
-- if (old_payload->delete)
-- continue;
--
-- new_payload = drm_atomic_get_mst_payload_state(new_mst_state,
-- old_payload->port);
-- new_payload->vc_start_slot = old_payload->vc_start_slot;
-- }
- }
- }
- EXPORT_SYMBOL(drm_dp_mst_atomic_wait_for_dependencies);
-@@ -4508,8 +4682,119 @@ void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_
- }
- EXPORT_SYMBOL(drm_dp_mst_update_slots);
-
-+/**
-+ * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
-+ * @mgr: manager for this port
-+ * @port: port to allocate a virtual channel for.
-+ * @pbn: payload bandwidth number to request
-+ * @slots: returned number of slots for this PBN.
-+ */
-+bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
-+ struct drm_dp_mst_port *port, int pbn, int slots)
-+{
-+ int ret;
-+
-+ if (slots < 0)
-+ return false;
-+
-+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
-+ if (!port)
-+ return false;
-+
-+ if (port->vcpi.vcpi > 0) {
-+ drm_dbg_kms(mgr->dev,
-+ "payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
-+ port->vcpi.vcpi, port->vcpi.pbn, pbn);
-+ if (pbn == port->vcpi.pbn) {
-+ drm_dp_mst_topology_put_port(port);
-+ return true;
-+ }
-+ }
-+
-+ ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
-+ if (ret) {
-+ drm_dbg_kms(mgr->dev, "failed to init time slots=%d ret=%d\n",
-+ DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
-+ drm_dp_mst_topology_put_port(port);
-+ goto out;
-+ }
-+ drm_dbg_kms(mgr->dev, "initing vcpi for pbn=%d slots=%d\n", pbn, port->vcpi.num_slots);
-+
-+ /* Keep port allocated until its payload has been removed */
-+ drm_dp_mst_get_port_malloc(port);
-+ drm_dp_mst_topology_put_port(port);
-+ return true;
-+out:
-+ return false;
-+}
-+EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
-+
-+int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
-+{
-+ int slots = 0;
-+
-+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
-+ if (!port)
-+ return slots;
-+
-+ slots = port->vcpi.num_slots;
-+ drm_dp_mst_topology_put_port(port);
-+ return slots;
-+}
-+EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
-+
-+/**
-+ * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
-+ * @mgr: manager for this port
-+ * @port: unverified pointer to a port.
-+ *
-+ * This just resets the number of slots for the ports VCPI for later programming.
-+ */
-+void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
-+{
-+ /*
-+ * A port with VCPI will remain allocated until its VCPI is
-+ * released, no verified ref needed
-+ */
-+
-+ port->vcpi.num_slots = 0;
-+}
-+EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
-+
-+/**
-+ * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
-+ * @mgr: manager for this port
-+ * @port: port to deallocate vcpi for
-+ *
-+ * This can be called unconditionally, regardless of whether
-+ * drm_dp_mst_allocate_vcpi() succeeded or not.
-+ */
-+void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
-+ struct drm_dp_mst_port *port)
-+{
-+ bool skip;
-+
-+ if (!port->vcpi.vcpi)
-+ return;
-+
-+ mutex_lock(&mgr->lock);
-+ skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
-+ mutex_unlock(&mgr->lock);
-+
-+ if (skip)
-+ return;
-+
-+ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
-+ port->vcpi.num_slots = 0;
-+ port->vcpi.pbn = 0;
-+ port->vcpi.aligned_pbn = 0;
-+ port->vcpi.vcpi = 0;
-+ drm_dp_mst_put_port_malloc(port);
-+}
-+EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
-+
- static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
-- int id, u8 start_slot, u8 num_slots)
-+ int id, struct drm_dp_payload *payload)
- {
- u8 payload_alloc[3], status;
- int ret;
-@@ -4519,8 +4804,8 @@ static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
- DP_PAYLOAD_TABLE_UPDATED);
-
- payload_alloc[0] = id;
-- payload_alloc[1] = start_slot;
-- payload_alloc[2] = num_slots;
-+ payload_alloc[1] = payload->start_slot;
-+ payload_alloc[2] = payload->num_slots;
-
- ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
- if (ret != 3) {
-@@ -4735,9 +5020,8 @@ static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
- void drm_dp_mst_dump_topology(struct seq_file *m,
- struct drm_dp_mst_topology_mgr *mgr)
- {
-- struct drm_dp_mst_topology_state *state;
-- struct drm_dp_mst_atomic_payload *payload;
-- int i, ret;
-+ int i;
-+ struct drm_dp_mst_port *port;
-
- mutex_lock(&mgr->lock);
- if (mgr->mst_primary)
-@@ -4746,35 +5030,36 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
- /* dump VCPIs */
- mutex_unlock(&mgr->lock);
-
-- ret = drm_modeset_lock_single_interruptible(&mgr->base.lock);
-- if (ret < 0)
-- return;
-+ mutex_lock(&mgr->payload_lock);
-+ seq_printf(m, "\n*** VCPI Info ***\n");
-+ seq_printf(m, "payload_mask: %lx, vcpi_mask: %lx, max_payloads: %d\n", mgr->payload_mask, mgr->vcpi_mask, mgr->max_payloads);
-
-- state = to_drm_dp_mst_topology_state(mgr->base.state);
-- seq_printf(m, "\n*** Atomic state info ***\n");
-- seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n",
-- state->payload_mask, mgr->max_payloads, state->start_slot, state->pbn_div);
--
-- seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc | sink name |\n");
-+ seq_printf(m, "\n| idx | port # | vcp_id | # slots | sink name |\n");
- for (i = 0; i < mgr->max_payloads; i++) {
-- list_for_each_entry(payload, &state->payloads, next) {
-+ if (mgr->proposed_vcpis[i]) {
- char name[14];
-
-- if (payload->vcpi != i || payload->delete)
-- continue;
--
-- fetch_monitor_name(mgr, payload->port, name, sizeof(name));
-- seq_printf(m, " %5d %6d %6d %02d - %02d %5d %5s %19s\n",
-+ port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
-+ fetch_monitor_name(mgr, port, name, sizeof(name));
-+ seq_printf(m, "%10d%10d%10d%10d%20s\n",
- i,
-- payload->port->port_num,
-- payload->vcpi,
-- payload->vc_start_slot,
-- payload->vc_start_slot + payload->time_slots - 1,
-- payload->pbn,
-- payload->dsc_enabled ? "Y" : "N",
-+ port->port_num,
-+ port->vcpi.vcpi,
-+ port->vcpi.num_slots,
- (*name != 0) ? name : "Unknown");
-- }
-+ } else
-+ seq_printf(m, "%6d - Unused\n", i);
-+ }
-+ seq_printf(m, "\n*** Payload Info ***\n");
-+ seq_printf(m, "| idx | state | start slot | # slots |\n");
-+ for (i = 0; i < mgr->max_payloads; i++) {
-+ seq_printf(m, "%10d%10d%15d%10d\n",
-+ i,
-+ mgr->payloads[i].payload_state,
-+ mgr->payloads[i].start_slot,
-+ mgr->payloads[i].num_slots);
- }
-+ mutex_unlock(&mgr->payload_lock);
-
- seq_printf(m, "\n*** DPCD Info ***\n");
- mutex_lock(&mgr->lock);
-@@ -4820,7 +5105,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
-
- out:
- mutex_unlock(&mgr->lock);
-- drm_modeset_unlock(&mgr->base.lock);
-+
- }
- EXPORT_SYMBOL(drm_dp_mst_dump_topology);
-
-@@ -5141,22 +5426,9 @@ drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr
- mgr, mst_state, mgr->max_payloads);
- return -EINVAL;
- }
--
-- /* Assign a VCPI */
-- if (!payload->vcpi) {
-- payload->vcpi = ffz(mst_state->payload_mask) + 1;
-- drm_dbg_atomic(mgr->dev, "[MST PORT:%p] assigned VCPI #%d\n",
-- payload->port, payload->vcpi);
-- mst_state->payload_mask |= BIT(payload->vcpi - 1);
-- }
- }
--
-- if (!payload_count)
-- mst_state->pbn_div = 0;
--
-- drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n",
-- mgr, mst_state, mst_state->pbn_div, avail_slots,
-- mst_state->total_avail_slots - avail_slots);
-+ drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU avail=%d used=%d\n",
-+ mgr, mst_state, avail_slots, mst_state->total_avail_slots - avail_slots);
-
- return 0;
- }
-@@ -5227,6 +5499,7 @@ EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
- * @state: Pointer to the new drm_atomic_state
- * @port: Pointer to the affected MST Port
- * @pbn: Newly recalculated bw required for link with DSC enabled
-+ * @pbn_div: Divider to calculate correct number of pbn per slot
- * @enable: Boolean flag to enable or disable DSC on the port
- *
- * This function enables DSC on the given Port
-@@ -5237,7 +5510,8 @@ EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
- */
- int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
- struct drm_dp_mst_port *port,
-- int pbn, bool enable)
-+ int pbn, int pbn_div,
-+ bool enable)
- {
- struct drm_dp_mst_topology_state *mst_state;
- struct drm_dp_mst_atomic_payload *payload;
-@@ -5263,7 +5537,7 @@ int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
- }
-
- if (enable) {
-- time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn);
-+ time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn, pbn_div);
- drm_dbg_atomic(state->dev,
- "[MST PORT:%p] Enabling DSC flag, reallocating %d time slots on the port\n",
- port, time_slots);
-@@ -5276,7 +5550,6 @@ int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
- return time_slots;
- }
- EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
--
- /**
- * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
- * atomic update is valid
-@@ -5334,6 +5607,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
-
- /**
- * drm_atomic_get_mst_topology_state: get MST topology state
-+ *
- * @state: global atomic state
- * @mgr: MST topology manager, also the private object in this case
- *
-@@ -5352,31 +5626,6 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a
- }
- EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
-
--/**
-- * drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any
-- * @state: global atomic state
-- * @mgr: MST topology manager, also the private object in this case
-- *
-- * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
-- * state vtable so that the private object state returned is that of a MST
-- * topology object.
-- *
-- * Returns:
-- *
-- * The MST topology state, or NULL if there's no topology state for this MST mgr
-- * in the global atomic state
-- */
--struct drm_dp_mst_topology_state *
--drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
-- struct drm_dp_mst_topology_mgr *mgr)
--{
-- struct drm_private_state *priv_state =
-- drm_atomic_get_new_private_obj_state(state, &mgr->base);
--
-- return priv_state ? to_dp_mst_topology_state(priv_state) : NULL;
--}
--EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);
--
- /**
- * drm_dp_mst_topology_mgr_init - initialise a topology manager
- * @mgr: manager struct to initialise
-@@ -5384,6 +5633,8 @@ EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);
- * @aux: DP helper aux channel to talk to this device
- * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
- * @max_payloads: maximum number of payloads this GPU can source
-+ * @max_lane_count: maximum number of lanes this GPU supports
-+ * @max_link_rate: maximum link rate per lane this GPU supports in kHz
- * @conn_base_id: the connector object ID the MST device is connected to.
- *
- * Return 0 for success, or negative error code on failure
-@@ -5391,12 +5642,14 @@ EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);
- int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_device *dev, struct drm_dp_aux *aux,
- int max_dpcd_transaction_bytes, int max_payloads,
-+ int max_lane_count, int max_link_rate,
- int conn_base_id)
- {
- struct drm_dp_mst_topology_state *mst_state;
-
- mutex_init(&mgr->lock);
- mutex_init(&mgr->qlock);
-+ mutex_init(&mgr->payload_lock);
- mutex_init(&mgr->delayed_destroy_lock);
- mutex_init(&mgr->up_req_lock);
- mutex_init(&mgr->probe_lock);
-@@ -5426,7 +5679,19 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
- mgr->aux = aux;
- mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
- mgr->max_payloads = max_payloads;
-+ mgr->max_lane_count = max_lane_count;
-+ mgr->max_link_rate = max_link_rate;
- mgr->conn_base_id = conn_base_id;
-+ if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
-+ max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
-+ return -EINVAL;
-+ mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
-+ if (!mgr->payloads)
-+ return -ENOMEM;
-+ mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
-+ if (!mgr->proposed_vcpis)
-+ return -ENOMEM;
-+ set_bit(0, &mgr->payload_mask);
-
- mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
- if (mst_state == NULL)
-@@ -5459,12 +5724,19 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
- destroy_workqueue(mgr->delayed_destroy_wq);
- mgr->delayed_destroy_wq = NULL;
- }
-+ mutex_lock(&mgr->payload_lock);
-+ kfree(mgr->payloads);
-+ mgr->payloads = NULL;
-+ kfree(mgr->proposed_vcpis);
-+ mgr->proposed_vcpis = NULL;
-+ mutex_unlock(&mgr->payload_lock);
- mgr->dev = NULL;
- mgr->aux = NULL;
- drm_atomic_private_obj_fini(&mgr->base);
- mgr->funcs = NULL;
-
- mutex_destroy(&mgr->delayed_destroy_lock);
-+ mutex_destroy(&mgr->payload_lock);
- mutex_destroy(&mgr->qlock);
- mutex_destroy(&mgr->lock);
- mutex_destroy(&mgr->up_req_lock);
-diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
-index 03604a37931c..e01a40f35284 100644
---- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
-+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
-@@ -52,7 +52,6 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_dp *intel_dp = &intel_mst->primary->dp;
-- struct drm_dp_mst_topology_state *mst_state;
- struct intel_connector *connector =
- to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
-@@ -60,28 +59,22 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
- &crtc_state->hw.adjusted_mode;
- int bpp, slots = -EINVAL;
-
-- mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
-- if (IS_ERR(mst_state))
-- return PTR_ERR(mst_state);
--
- crtc_state->lane_count = limits->max_lane_count;
- crtc_state->port_clock = limits->max_rate;
-
-- // TODO: Handle pbn_div changes by adding a new MST helper
-- if (!mst_state->pbn_div) {
-- mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
-- limits->max_rate,
-- limits->max_lane_count);
-- }
--
- for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
-+
- crtc_state->pipe_bpp = bpp;
-
- crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
- crtc_state->pipe_bpp,
- false);
- slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
-- connector->port, crtc_state->pbn);
-+ connector->port,
-+ crtc_state->pbn,
-+ drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
-+ crtc_state->port_clock,
-+ crtc_state->lane_count));
- if (slots == -EDEADLK)
- return slots;
- if (slots >= 0)
-@@ -364,17 +357,21 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
- struct intel_dp *intel_dp = &dig_port->dp;
- struct intel_connector *connector =
- to_intel_connector(old_conn_state->connector);
-- struct drm_dp_mst_topology_state *mst_state =
-- drm_atomic_get_mst_topology_state(&state->base, &intel_dp->mst_mgr);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
-+ int start_slot = intel_dp_is_uhbr(old_crtc_state) ? 0 : 1;
-+ int ret;
-
- drm_dbg_kms(&i915->drm, "active links %d\n",
- intel_dp->active_mst_links);
-
- intel_hdcp_disable(intel_mst->connector);
-
-- drm_dp_remove_payload(&intel_dp->mst_mgr, mst_state,
-- drm_atomic_get_mst_payload_state(mst_state, connector->port));
-+ drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
-+
-+ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, start_slot);
-+ if (ret) {
-+ drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret);
-+ }
-
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
- }
-@@ -402,6 +399,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
-
- intel_disable_transcoder(old_crtc_state);
-
-+ drm_dp_update_payload_part2(&intel_dp->mst_mgr);
-+
- clear_act_sent(encoder, old_crtc_state);
-
- intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
-@@ -409,6 +408,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
-
- wait_for_act_sent(encoder, old_crtc_state);
-
-+ drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
-+
- intel_ddi_disable_transcoder_func(old_crtc_state);
-
- if (DISPLAY_VER(dev_priv) >= 9)
-@@ -475,8 +476,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_connector *connector =
- to_intel_connector(conn_state->connector);
-- struct drm_dp_mst_topology_state *mst_state =
-- drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
-+ int start_slot = intel_dp_is_uhbr(pipe_config) ? 0 : 1;
- int ret;
- bool first_mst_stream;
-
-@@ -502,13 +502,16 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
- dig_port->base.pre_enable(state, &dig_port->base,
- pipe_config, NULL);
-
-+ ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
-+ connector->port,
-+ pipe_config->pbn,
-+ pipe_config->dp_m_n.tu);
-+ if (!ret)
-+ drm_err(&dev_priv->drm, "failed to allocate vcpi\n");
-+
- intel_dp->active_mst_links++;
-
-- ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state,
-- drm_atomic_get_mst_payload_state(mst_state, connector->port));
-- if (ret < 0)
-- drm_err(&dev_priv->drm, "Failed to create MST payload for %s: %d\n",
-- connector->base.name, ret);
-+ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, start_slot);
-
- /*
- * Before Gen 12 this is not done as part of
-@@ -531,10 +534,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
-- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-- struct drm_dp_mst_topology_state *mst_state =
-- drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
- enum transcoder trans = pipe_config->cpu_transcoder;
-
- drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
-@@ -562,8 +562,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
-
- wait_for_act_sent(encoder, pipe_config);
-
-- drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base,
-- drm_atomic_get_mst_payload_state(mst_state, connector->port));
-+ drm_dp_update_payload_part2(&intel_dp->mst_mgr);
-
- if (DISPLAY_VER(dev_priv) >= 14 && pipe_config->fec_enable)
- intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(trans), 0,
-@@ -950,6 +949,8 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
- struct intel_dp *intel_dp = &dig_port->dp;
- enum port port = dig_port->base.port;
- int ret;
-+ int max_source_rate =
-+ intel_dp->source_rates[intel_dp->num_source_rates - 1];
-
- if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
- return 0;
-@@ -965,7 +966,10 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
- /* create encoders */
- intel_dp_create_fake_mst_encoders(dig_port);
- ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
-- &intel_dp->aux, 16, 3, conn_base_id);
-+ &intel_dp->aux, 16, 3,
-+ dig_port->max_lanes,
-+ max_source_rate,
-+ conn_base_id);
- if (ret) {
- intel_dp->mst_mgr.cbs = NULL;
- return ret;
-diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
-index 6406fd487ee5..987e02eea66a 100644
---- a/drivers/gpu/drm/i915/display/intel_hdcp.c
-+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
-@@ -31,30 +31,8 @@
-
- static int intel_conn_to_vcpi(struct intel_connector *connector)
- {
-- struct drm_dp_mst_topology_mgr *mgr;
-- struct drm_dp_mst_atomic_payload *payload;
-- struct drm_dp_mst_topology_state *mst_state;
-- int vcpi = 0;
--
- /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
-- if (!connector->port)
-- return 0;
-- mgr = connector->port->mgr;
--
-- drm_modeset_lock(&mgr->base.lock, NULL);
-- mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
-- payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
-- if (drm_WARN_ON(mgr->dev, !payload))
-- goto out;
--
-- vcpi = payload->vcpi;
-- if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
-- vcpi = 0;
-- goto out;
-- }
--out:
-- drm_modeset_unlock(&mgr->base.lock);
-- return vcpi;
-+ return connector->port ? connector->port->vcpi.vcpi : 0;
- }
-
- /*
-diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
-index 33c97d510999..8400a5d8ea6e 100644
---- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
-+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
-@@ -932,7 +932,6 @@ struct nv50_msto {
- struct nv50_head *head;
- struct nv50_mstc *mstc;
- bool disabled;
-- bool enabled;
- };
-
- struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder)
-@@ -949,36 +948,58 @@ struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder)
- }
-
- static void
--nv50_msto_cleanup(struct drm_atomic_state *state,
-- struct drm_dp_mst_topology_state *mst_state,
-- struct drm_dp_mst_topology_mgr *mgr,
-- struct nv50_msto *msto)
-+nv50_msto_cleanup(struct nv50_msto *msto)
- {
- struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
-- struct drm_dp_mst_atomic_payload *payload =
-- drm_atomic_get_mst_payload_state(mst_state, msto->mstc->port);
-+
-+ struct nv50_mstc *mstc = msto->mstc;
-+ struct nv50_mstm *mstm = mstc->mstm;
-+
-+ if (!msto->disabled)
-+ return;
-
- NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
-
-- if (msto->disabled) {
-- msto->mstc = NULL;
-- msto->disabled = false;
-- } else if (msto->enabled) {
-- drm_dp_add_payload_part2(mgr, state, payload);
-- msto->enabled = false;
-+ drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
-+
-+ msto->mstc = NULL;
-+ msto->disabled = false;
-+}
-+
-+static struct drm_dp_payload *
-+nv50_msto_payload(struct nv50_msto *msto)
-+{
-+ struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
-+ struct nv50_mstc *mstc = msto->mstc;
-+ struct nv50_mstm *mstm = mstc->mstm;
-+ int vcpi = mstc->port->vcpi.vcpi, i;
-+
-+ WARN_ON(!mutex_is_locked(&mstm->mgr.payload_lock));
-+
-+ NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
-+ for (i = 0; i < mstm->mgr.max_payloads; i++) {
-+ struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
-+ NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
-+ mstm->outp->base.base.name, i, payload->vcpi,
-+ payload->start_slot, payload->num_slots);
-+ }
-+
-+ for (i = 0; i < mstm->mgr.max_payloads; i++) {
-+ struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
-+ if (payload->vcpi == vcpi)
-+ return payload;
- }
-+
-+ return NULL;
- }
-
- static void
--nv50_msto_prepare(struct drm_atomic_state *state,
-- struct drm_dp_mst_topology_state *mst_state,
-- struct drm_dp_mst_topology_mgr *mgr,
-- struct nv50_msto *msto)
-+nv50_msto_prepare(struct nv50_msto *msto)
- {
- struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
- struct nv50_mstc *mstc = msto->mstc;
- struct nv50_mstm *mstm = mstc->mstm;
-- struct drm_dp_mst_atomic_payload *payload;
-+ struct drm_dp_payload *payload = NULL;
- struct {
- struct nv50_disp_mthd_v1 base;
- struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
-@@ -990,27 +1011,28 @@ nv50_msto_prepare(struct drm_atomic_state *state,
- (0x0100 << msto->head->base.index),
- };
-
-+ mutex_lock(&mstm->mgr.payload_lock);
-+
- NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
-
-- payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
-+ if (mstc->port->vcpi.vcpi > 0)
-+ payload = nv50_msto_payload(msto);
-
-- // TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
-- if (msto->disabled) {
-- drm_dp_remove_payload(mgr, mst_state, payload);
-- } else {
-- if (msto->enabled)
-- drm_dp_add_payload_part1(mgr, mst_state, payload);
-+ if (payload) {
-+ NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
-+ msto->encoder.name, msto->head->base.base.name,
-+ payload->start_slot, payload->num_slots,
-+ mstc->port->vcpi.pbn, mstc->port->vcpi.aligned_pbn);
-
-- args.vcpi.start_slot = payload->vc_start_slot;
-- args.vcpi.num_slots = payload->time_slots;
-+ args.vcpi.start_slot = payload->start_slot;
-+ args.vcpi.num_slots = payload->num_slots;
- args.vcpi.pbn = payload->pbn;
-- args.vcpi.aligned_pbn = payload->time_slots * mst_state->pbn_div;
-- }
-+ } else {
-+ NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
-+ msto->encoder.name, msto->head->base.base.name, 0, 0, 0, 0);
-+ }
-
-- NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
-- msto->encoder.name, msto->head->base.base.name,
-- args.vcpi.start_slot, args.vcpi.num_slots,
-- args.vcpi.pbn, args.vcpi.aligned_pbn);
-+ mutex_unlock(&mstm->mgr.payload_lock);
-
- nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args));
- }
-@@ -1022,7 +1044,6 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
- {
- struct drm_atomic_state *state = crtc_state->state;
- struct drm_connector *connector = conn_state->connector;
-- struct drm_dp_mst_topology_state *mst_state;
- struct nv50_mstc *mstc = nv50_mstc(connector);
- struct nv50_mstm *mstm = mstc->mstm;
- struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
-@@ -1050,18 +1071,8 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
- false);
- }
-
-- mst_state = drm_atomic_get_mst_topology_state(state, &mstm->mgr);
-- if (IS_ERR(mst_state))
-- return PTR_ERR(mst_state);
--
-- if (!mst_state->pbn_div) {
-- struct nouveau_encoder *outp = mstc->mstm->outp;
--
-- mst_state->pbn_div = drm_dp_get_vc_payload_bw(&mstm->mgr,
-- outp->dp.link_bw, outp->dp.link_nr);
-- }
--
-- slots = drm_dp_atomic_find_time_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn);
-+ slots = drm_dp_atomic_find_time_slots(state, &mstm->mgr, mstc->port,
-+ asyh->dp.pbn, 0);
- if (slots < 0)
- return slots;
-
-@@ -1093,6 +1104,7 @@ nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- u8 proto;
-+ bool r;
-
- drm_connector_list_iter_begin(encoder->dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
-@@ -1107,6 +1119,10 @@ nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st
- if (WARN_ON(!mstc))
- return;
-
-+ r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, asyh->dp.pbn, asyh->dp.tu);
-+ if (!r)
-+ DRM_DEBUG_KMS("Failed to allocate VCPI\n");
-+
- if (!mstm->links++)
- nv50_outp_acquire(mstm->outp, false /*XXX: MST audio.*/);
-
-@@ -1119,7 +1135,6 @@ nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st
- nv50_dp_bpc_to_depth(asyh->or.bpc));
-
- msto->mstc = mstc;
-- msto->enabled = true;
- mstm->modified = true;
- }
-
-@@ -1130,6 +1145,8 @@ nv50_msto_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *s
- struct nv50_mstc *mstc = msto->mstc;
- struct nv50_mstm *mstm = mstc->mstm;
-
-+ drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
-+
- mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
- mstm->modified = true;
- if (!--mstm->links)
-@@ -1349,9 +1366,7 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
- }
-
- static void
--nv50_mstm_cleanup(struct drm_atomic_state *state,
-- struct drm_dp_mst_topology_state *mst_state,
-- struct nv50_mstm *mstm)
-+nv50_mstm_cleanup(struct nv50_mstm *mstm)
- {
- struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
- struct drm_encoder *encoder;
-@@ -1359,12 +1374,14 @@ nv50_mstm_cleanup(struct drm_atomic_state *state,
- NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
- drm_dp_check_act_status(&mstm->mgr);
-
-+ drm_dp_update_payload_part2(&mstm->mgr);
-+
- drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
- if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
- struct nv50_msto *msto = nv50_msto(encoder);
- struct nv50_mstc *mstc = msto->mstc;
- if (mstc && mstc->mstm == mstm)
-- nv50_msto_cleanup(state, mst_state, &mstm->mgr, msto);
-+ nv50_msto_cleanup(msto);
- }
- }
-
-@@ -1372,34 +1389,20 @@ nv50_mstm_cleanup(struct drm_atomic_state *state,
- }
-
- static void
--nv50_mstm_prepare(struct drm_atomic_state *state,
-- struct drm_dp_mst_topology_state *mst_state,
-- struct nv50_mstm *mstm)
-+nv50_mstm_prepare(struct nv50_mstm *mstm)
- {
- struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
- struct drm_encoder *encoder;
-
- NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
-+ drm_dp_update_payload_part1(&mstm->mgr, 1);
-
-- /* Disable payloads first */
-- drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
-- if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
-- struct nv50_msto *msto = nv50_msto(encoder);
-- struct nv50_mstc *mstc = msto->mstc;
-- if (mstc && mstc->mstm == mstm && msto->disabled)
-- nv50_msto_prepare(state, mst_state, &mstm->mgr, msto);
-- }
-- }
--
-- /* Add payloads for new heads, while also updating the start slots of any unmodified (but
-- * active) heads that may have had their VC slots shifted left after the previous step
-- */
- drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
- if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
- struct nv50_msto *msto = nv50_msto(encoder);
- struct nv50_mstc *mstc = msto->mstc;
-- if (mstc && mstc->mstm == mstm && !msto->disabled)
-- nv50_msto_prepare(state, mst_state, &mstm->mgr, msto);
-+ if (mstc && mstc->mstm == mstm)
-+ nv50_msto_prepare(msto);
- }
- }
-
-@@ -1596,7 +1599,9 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
- mstm->mgr.cbs = &nv50_mstm;
-
- ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
-- max_payloads, conn_base_id);
-+ max_payloads, outp->dcb->dpconf.link_nr,
-+ drm_dp_bw_code_to_link_rate(outp->dcb->dpconf.link_bw),
-+ conn_base_id);
- if (ret)
- return ret;
-
-@@ -2048,20 +2053,20 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
- static void
- nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
- {
-- struct drm_dp_mst_topology_mgr *mgr;
-- struct drm_dp_mst_topology_state *mst_state;
- struct nouveau_drm *drm = nouveau_drm(state->dev);
- struct nv50_disp *disp = nv50_disp(drm->dev);
- struct nv50_core *core = disp->core;
- struct nv50_mstm *mstm;
-- int i;
-+ struct drm_encoder *encoder;
-
- NV_ATOMIC(drm, "commit core %08x\n", interlock[NV50_DISP_INTERLOCK_BASE]);
-
-- for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
-- mstm = nv50_mstm(mgr);
-- if (mstm->modified)
-- nv50_mstm_prepare(state, mst_state, mstm);
-+ drm_for_each_encoder(encoder, drm->dev) {
-+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
-+ mstm = nouveau_encoder(encoder)->dp.mstm;
-+ if (mstm && mstm->modified)
-+ nv50_mstm_prepare(mstm);
-+ }
- }
-
- core->func->ntfy_init(disp->sync, NV50_DISP_CORE_NTFY);
-@@ -2070,10 +2075,12 @@ nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
- disp->core->chan.base.device))
- NV_ERROR(drm, "core notifier timeout\n");
-
-- for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
-- mstm = nv50_mstm(mgr);
-- if (mstm->modified)
-- nv50_mstm_cleanup(state, mst_state, mstm);
-+ drm_for_each_encoder(encoder, drm->dev) {
-+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
-+ mstm = nouveau_encoder(encoder)->dp.mstm;
-+ if (mstm && mstm->modified)
-+ nv50_mstm_cleanup(mstm);
-+ }
- }
- }
-
-diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
-index 41fd8352ab65..1d2f77835de5 100644
---- a/include/drm/display/drm_dp_mst_helper.h
-+++ b/include/drm/display/drm_dp_mst_helper.h
-@@ -48,6 +48,20 @@ struct drm_dp_mst_topology_ref_history {
-
- struct drm_dp_mst_branch;
-
-+/**
-+ * struct drm_dp_vcpi - Virtual Channel Payload Identifier
-+ * @vcpi: Virtual channel ID.
-+ * @pbn: Payload Bandwidth Number for this channel
-+ * @aligned_pbn: PBN aligned with slot size
-+ * @num_slots: number of slots for this PBN
-+ */
-+struct drm_dp_vcpi {
-+ int vcpi;
-+ int pbn;
-+ int aligned_pbn;
-+ int num_slots;
-+};
-+
- /**
- * struct drm_dp_mst_port - MST port
- * @port_num: port number
-@@ -131,6 +145,7 @@ struct drm_dp_mst_port {
- struct drm_dp_aux *passthrough_aux;
- struct drm_dp_mst_branch *parent;
-
-+ struct drm_dp_vcpi vcpi;
- struct drm_connector *connector;
- struct drm_dp_mst_topology_mgr *mgr;
-
-@@ -515,6 +530,20 @@ struct drm_dp_mst_topology_cbs {
- void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr);
- };
-
-+#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
-+
-+#define DP_PAYLOAD_LOCAL 1
-+#define DP_PAYLOAD_REMOTE 2
-+#define DP_PAYLOAD_DELETE_LOCAL 3
-+
-+struct drm_dp_payload {
-+ int payload_state;
-+ int start_slot;
-+ int num_slots;
-+ int vcpi;
-+ int pbn;
-+};
-+
- #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
-
- /**
-@@ -527,34 +556,6 @@ struct drm_dp_mst_atomic_payload {
- /** @port: The MST port assigned to this payload */
- struct drm_dp_mst_port *port;
-
-- /**
-- * @vc_start_slot: The time slot that this payload starts on. Because payload start slots
-- * can't be determined ahead of time, the contents of this value are UNDEFINED at atomic
-- * check time. This shouldn't usually matter, as the start slot should never be relevant for
-- * atomic state computations.
-- *
-- * Since this value is determined at commit time instead of check time, this value is
-- * protected by the MST helpers ensuring that async commits operating on the given topology
-- * never run in parallel. In the event that a driver does need to read this value (e.g. to
-- * inform hardware of the starting timeslot for a payload), the driver may either:
-- *
-- * * Read this field during the atomic commit after
-- * drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the
-- * previous MST states payload start slots have been copied over to the new state. Note
-- * that a new start slot won't be assigned/removed from this payload until
-- * drm_dp_add_payload_part1()/drm_dp_remove_payload() have been called.
-- * * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to
-- * get committed to hardware by calling drm_crtc_commit_wait() on each of the
-- * &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps.
-- *
-- * If neither of the two above solutions suffice (e.g. the driver needs to read the start
-- * slot in the middle of an atomic commit without waiting for some reason), then drivers
-- * should cache this value themselves after changing payloads.
-- */
-- s8 vc_start_slot;
--
-- /** @vcpi: The Virtual Channel Payload Identifier */
-- u8 vcpi;
- /**
- * @time_slots:
- * The number of timeslots allocated to this payload from the source DP Tx to
-@@ -582,6 +583,8 @@ struct drm_dp_mst_topology_state {
- /** @base: Base private state for atomic */
- struct drm_private_state base;
-
-+ /** @payloads: The list of payloads being created/destroyed in this state */
-+ struct list_head payloads;
- /** @mgr: The topology manager */
- struct drm_dp_mst_topology_mgr *mgr;
-
-@@ -598,21 +601,10 @@ struct drm_dp_mst_topology_state {
- /** @num_commit_deps: The number of CRTC commits in @commit_deps */
- size_t num_commit_deps;
-
-- /** @payload_mask: A bitmask of allocated VCPIs, used for VCPI assignments */
-- u32 payload_mask;
-- /** @payloads: The list of payloads being created/destroyed in this state */
-- struct list_head payloads;
--
- /** @total_avail_slots: The total number of slots this topology can handle (63 or 64) */
- u8 total_avail_slots;
- /** @start_slot: The first usable time slot in this topology (1 or 0) */
- u8 start_slot;
--
-- /**
-- * @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this
-- * out itself.
-- */
-- int pbn_div;
- };
-
- #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
-@@ -652,6 +644,14 @@ struct drm_dp_mst_topology_mgr {
- * @max_payloads: maximum number of payloads the GPU can generate.
- */
- int max_payloads;
-+ /**
-+ * @max_lane_count: maximum number of lanes the GPU can drive.
-+ */
-+ int max_lane_count;
-+ /**
-+ * @max_link_rate: maximum link rate per lane GPU can output, in kHz.
-+ */
-+ int max_link_rate;
- /**
- * @conn_base_id: DRM connector ID this mgr is connected to. Only used
- * to build the MST connector path value.
-@@ -694,20 +694,6 @@ struct drm_dp_mst_topology_mgr {
- */
- bool payload_id_table_cleared : 1;
-
-- /**
-- * @payload_count: The number of currently active payloads in hardware. This value is only
-- * intended to be used internally by MST helpers for payload tracking, and is only safe to
-- * read/write from the atomic commit (not check) context.
-- */
-- u8 payload_count;
--
-- /**
-- * @next_start_slot: The starting timeslot to use for new VC payloads. This value is used
-- * internally by MST helpers for payload tracking, and is only safe to read/write from the
-- * atomic commit (not check) context.
-- */
-- u8 next_start_slot;
--
- /**
- * @mst_primary: Pointer to the primary/first branch device.
- */
-@@ -721,6 +707,10 @@ struct drm_dp_mst_topology_mgr {
- * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
- */
- u8 sink_count;
-+ /**
-+ * @pbn_div: PBN to slots divisor.
-+ */
-+ int pbn_div;
-
- /**
- * @funcs: Atomic helper callbacks
-@@ -737,6 +727,32 @@ struct drm_dp_mst_topology_mgr {
- */
- struct list_head tx_msg_downq;
-
-+ /**
-+ * @payload_lock: Protect payload information.
-+ */
-+ struct mutex payload_lock;
-+ /**
-+ * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
-+ * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of
-+ * this array is determined by @max_payloads.
-+ */
-+ struct drm_dp_vcpi **proposed_vcpis;
-+ /**
-+ * @payloads: Array of payloads. The size of this array is determined
-+ * by @max_payloads.
-+ */
-+ struct drm_dp_payload *payloads;
-+ /**
-+ * @payload_mask: Elements of @payloads actually in use. Since
-+ * reallocation of active outputs isn't possible gaps can be created by
-+ * disabling outputs out of order compared to how they've been enabled.
-+ */
-+ unsigned long payload_mask;
-+ /**
-+ * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
-+ */
-+ unsigned long vcpi_mask;
-+
- /**
- * @tx_waitq: Wait to queue stall for the tx worker.
- */
-@@ -808,7 +824,9 @@ struct drm_dp_mst_topology_mgr {
- int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_device *dev, struct drm_dp_aux *aux,
- int max_dpcd_transaction_bytes,
-- int max_payloads, int conn_base_id);
-+ int max_payloads,
-+ int max_lane_count, int max_link_rate,
-+ int conn_base_id);
-
- void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
-
-@@ -831,17 +849,28 @@ int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
-
- int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
-
-+bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
-+ struct drm_dp_mst_port *port, int pbn, int slots);
-+
-+int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
-+
-+
-+void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
-+
- void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
-
--int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
-- struct drm_dp_mst_topology_state *mst_state,
-- struct drm_dp_mst_atomic_payload *payload);
--int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
-- struct drm_atomic_state *state,
-- struct drm_dp_mst_atomic_payload *payload);
--void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
-- struct drm_dp_mst_topology_state *mst_state,
-- struct drm_dp_mst_atomic_payload *payload);
-+void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
-+ struct drm_dp_mst_port *port);
-+
-+
-+int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
-+ int pbn);
-+
-+
-+int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot);
-+
-+
-+int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
-
- int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
-
-@@ -863,22 +892,17 @@ int drm_dp_mst_connector_late_register(struct drm_connector *connector,
- void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
- struct drm_dp_mst_port *port);
-
--struct drm_dp_mst_topology_state *
--drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
-- struct drm_dp_mst_topology_mgr *mgr);
--struct drm_dp_mst_topology_state *
--drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
-- struct drm_dp_mst_topology_mgr *mgr);
--struct drm_dp_mst_atomic_payload *
--drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
-- struct drm_dp_mst_port *port);
-+struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
-+ struct drm_dp_mst_topology_mgr *mgr);
- int __must_check
- drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
- struct drm_dp_mst_topology_mgr *mgr,
-- struct drm_dp_mst_port *port, int pbn);
-+ struct drm_dp_mst_port *port, int pbn,
-+ int pbn_div);
- int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
- struct drm_dp_mst_port *port,
-- int pbn, bool enable);
-+ int pbn, int pbn_div,
-+ bool enable);
- int __must_check
- drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
- struct drm_dp_mst_topology_mgr *mgr);
-@@ -902,12 +926,6 @@ void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
-
- struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
-
--static inline struct drm_dp_mst_topology_state *
--to_drm_dp_mst_topology_state(struct drm_private_state *state)
--{
-- return container_of(state, struct drm_dp_mst_topology_state, base);
--}
--
- extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
-
- /**
---
-2.39.0
-
diff --git a/0003-btrfs-fix-invalid-leaf-access-due-to-inline-extent-d.patch b/0003-btrfs-fix-invalid-leaf-access-due-to-inline-extent-d.patch
deleted file mode 100644
index 7d3468b..0000000
--- a/0003-btrfs-fix-invalid-leaf-access-due-to-inline-extent-d.patch
+++ /dev/null
@@ -1,67 +0,0 @@
-From 0a772f0e9788d760313382ec21b81dca83515966 Mon Sep 17 00:00:00 2001
-From: Filipe Manana <fdmanana@suse.com>
-Date: Thu, 12 Jan 2023 14:17:20 +0000
-Subject: [PATCH 3/5] btrfs: fix invalid leaf access due to inline extent
- during lseek
-
-During lseek, for SEEK_DATA and SEEK_HOLE modes, we access the disk_bytenr
-of anextent without checking its type. However inline extents have their
-data starting the offset of the disk_bytenr field, so accessing that field
-when we have an inline extent can result in either of the following:
-
-1) Interpret the inline extent's data as a disk_bytenr value;
-
-2) In case the inline data is less than 8 bytes, we access part of some
- other item in the leaf, or unused space in the leaf;
-
-3) In case the inline data is less than 8 bytes and the extent item is
- the first item in the leaf, we can access beyond the leaf's limit.
-
-So fix this by not accessing the disk_bytenr field if we have an inline
-extent.
-
-Fixes: b6e833567ea1 ("btrfs: make hole and data seeking a lot more efficient")
-Reported-by: Matthias Schoepfer <matthias.schoepfer@googlemail.com>
-Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=216908
-Link: https://lore.kernel.org/linux-btrfs/7f25442f-b121-2a3a-5a3d-22bcaae83cd4@leemhuis.info/
-Signed-off-by: Filipe Manana <fdmanana@suse.com>
-Cherry-picked-for: https://bugs.archlinux.org/task/77041
----
- fs/btrfs/file.c | 13 ++++++++++---
- 1 file changed, 10 insertions(+), 3 deletions(-)
-
-diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
-index 9bef8eaa074a..23056d9914d8 100644
---- a/fs/btrfs/file.c
-+++ b/fs/btrfs/file.c
-@@ -3838,6 +3838,7 @@ static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset,
- struct extent_buffer *leaf = path->nodes[0];
- struct btrfs_file_extent_item *extent;
- u64 extent_end;
-+ u8 type;
-
- if (path->slots[0] >= btrfs_header_nritems(leaf)) {
- ret = btrfs_next_leaf(root, path);
-@@ -3892,10 +3893,16 @@ static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset,
-
- extent = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
-+ type = btrfs_file_extent_type(leaf, extent);
-
-- if (btrfs_file_extent_disk_bytenr(leaf, extent) == 0 ||
-- btrfs_file_extent_type(leaf, extent) ==
-- BTRFS_FILE_EXTENT_PREALLOC) {
-+ /*
-+ * Can't access the extent's disk_bytenr field if this is an
-+ * inline extent, since at that offset, it's where the extent
-+ * data starts.
-+ */
-+ if (type == BTRFS_FILE_EXTENT_PREALLOC ||
-+ (type == BTRFS_FILE_EXTENT_REG &&
-+ btrfs_file_extent_disk_bytenr(leaf, extent) == 0)) {
- /*
- * Explicit hole or prealloc extent, search for delalloc.
- * A prealloc extent is treated like a hole.
---
-2.39.0
-
diff --git a/0004-wifi-mac80211-fix-initialization-of-rx-link-and-rx-l.patch b/0004-wifi-mac80211-fix-initialization-of-rx-link-and-rx-l.patch
deleted file mode 100644
index 0820c6c..0000000
--- a/0004-wifi-mac80211-fix-initialization-of-rx-link-and-rx-l.patch
+++ /dev/null
@@ -1,459 +0,0 @@
-From 93753514870b99ede0d3d94e176e3c35f55aab40 Mon Sep 17 00:00:00 2001
-From: Felix Fietkau <nbd@nbd.name>
-Date: Fri, 30 Dec 2022 21:07:47 +0100
-Subject: [PATCH 4/5] wifi: mac80211: fix initialization of rx->link and
- rx->link_sta
-
-There are some codepaths that do not initialize rx->link_sta properly. This
-causes a crash in places which assume that rx->link_sta is valid if rx->sta
-is valid.
-One known instance is triggered by __ieee80211_rx_h_amsdu being called from
-fast-rx. It results in a crash like this one:
-
- BUG: kernel NULL pointer dereference, address: 00000000000000a8
- #PF: supervisor write access in kernel mode
- #PF: error_code(0x0002) - not-present page PGD 0 P4D 0
- Oops: 0002 [#1] PREEMPT SMP PTI
- CPU: 1 PID: 506 Comm: mt76-usb-rx phy Tainted: G E 6.1.0-debian64x+1.7 #3
- Hardware name: ZOTAC ZBOX-ID92/ZBOX-IQ01/ZBOX-ID92/ZBOX-IQ01, BIOS B220P007 05/21/2014
- RIP: 0010:ieee80211_deliver_skb+0x62/0x1f0 [mac80211]
- Code: 00 48 89 04 24 e8 9e a7 c3 df 89 c0 48 03 1c c5 a0 ea 39 a1 4c 01 6b 08 48 ff 03 48
- 83 7d 28 00 74 11 48 8b 45 30 48 63 55 44 <48> 83 84 d0 a8 00 00 00 01 41 8b 86 c0
- 11 00 00 8d 50 fd 83 fa 01
- RSP: 0018:ffff999040803b10 EFLAGS: 00010286
- RAX: 0000000000000000 RBX: ffffb9903f496480 RCX: 0000000000000000
- RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
- RBP: ffff999040803ce0 R08: 0000000000000000 R09: 0000000000000000
- R10: 0000000000000000 R11: 0000000000000000 R12: ffff8d21828ac900
- R13: 000000000000004a R14: ffff8d2198ed89c0 R15: ffff8d2198ed8000
- FS: 0000000000000000(0000) GS:ffff8d24afe80000(0000) knlGS:0000000000000000
- CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
- CR2: 00000000000000a8 CR3: 0000000429810002 CR4: 00000000001706e0
- Call Trace:
- <TASK>
- __ieee80211_rx_h_amsdu+0x1b5/0x240 [mac80211]
- ? ieee80211_prepare_and_rx_handle+0xcdd/0x1320 [mac80211]
- ? __local_bh_enable_ip+0x3b/0xa0
- ieee80211_prepare_and_rx_handle+0xcdd/0x1320 [mac80211]
- ? prepare_transfer+0x109/0x1a0 [xhci_hcd]
- ieee80211_rx_list+0xa80/0xda0 [mac80211]
- mt76_rx_complete+0x207/0x2e0 [mt76]
- mt76_rx_poll_complete+0x357/0x5a0 [mt76]
- mt76u_rx_worker+0x4f5/0x600 [mt76_usb]
- ? mt76_get_min_avg_rssi+0x140/0x140 [mt76]
- __mt76_worker_fn+0x50/0x80 [mt76]
- kthread+0xed/0x120
- ? kthread_complete_and_exit+0x20/0x20
- ret_from_fork+0x22/0x30
-
-Since the initialization of rx->link and rx->link_sta is rather convoluted
-and duplicated in many places, clean it up by using a helper function to
-set it.
-
-Fixes: ccdde7c74ffd ("wifi: mac80211: properly implement MLO key handling")
-Fixes: b320d6c456ff ("wifi: mac80211: use correct rx link_sta instead of default")
-Signed-off-by: Felix Fietkau <nbd@nbd.name>
-Link: https://lore.kernel.org/r/20221230200747.19040-1-nbd@nbd.name
-[remove unnecessary rx->sta->sta.mlo check]
-Cc: stable@vger.kernel.org
-Signed-off-by: Johannes Berg <johannes.berg@intel.com>
-Cherry-picked-for: https://bugs.archlinux.org/task/76922
----
- net/mac80211/rx.c | 222 +++++++++++++++++++++-------------------------
- 1 file changed, 99 insertions(+), 123 deletions(-)
-
-diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
-index f99416d2e144..3262ebb24092 100644
---- a/net/mac80211/rx.c
-+++ b/net/mac80211/rx.c
-@@ -4070,6 +4070,58 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
- #undef CALL_RXH
- }
-
-+static bool
-+ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id)
-+{
-+ if (!sta->mlo)
-+ return false;
-+
-+ return !!(sta->valid_links & BIT(link_id));
-+}
-+
-+static bool ieee80211_rx_data_set_link(struct ieee80211_rx_data *rx,
-+ u8 link_id)
-+{
-+ rx->link_id = link_id;
-+ rx->link = rcu_dereference(rx->sdata->link[link_id]);
-+
-+ if (!rx->sta)
-+ return rx->link;
-+
-+ if (!ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, link_id))
-+ return false;
-+
-+ rx->link_sta = rcu_dereference(rx->sta->link[link_id]);
-+
-+ return rx->link && rx->link_sta;
-+}
-+
-+static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx,
-+ struct ieee80211_sta *pubsta,
-+ int link_id)
-+{
-+ struct sta_info *sta;
-+
-+ sta = container_of(pubsta, struct sta_info, sta);
-+
-+ rx->link_id = link_id;
-+ rx->sta = sta;
-+
-+ if (sta) {
-+ rx->local = sta->sdata->local;
-+ if (!rx->sdata)
-+ rx->sdata = sta->sdata;
-+ rx->link_sta = &sta->deflink;
-+ }
-+
-+ if (link_id < 0)
-+ rx->link = &rx->sdata->deflink;
-+ else if (!ieee80211_rx_data_set_link(rx, link_id))
-+ return false;
-+
-+ return true;
-+}
-+
- /*
- * This function makes calls into the RX path, therefore
- * it has to be invoked under RCU read lock.
-@@ -4078,16 +4130,19 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
- {
- struct sk_buff_head frames;
- struct ieee80211_rx_data rx = {
-- .sta = sta,
-- .sdata = sta->sdata,
-- .local = sta->local,
- /* This is OK -- must be QoS data frame */
- .security_idx = tid,
- .seqno_idx = tid,
-- .link_id = -1,
- };
- struct tid_ampdu_rx *tid_agg_rx;
-- u8 link_id;
-+ int link_id = -1;
-+
-+ /* FIXME: statistics won't be right with this */
-+ if (sta->sta.valid_links)
-+ link_id = ffs(sta->sta.valid_links) - 1;
-+
-+ if (!ieee80211_rx_data_set_sta(&rx, &sta->sta, link_id))
-+ return;
-
- tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
- if (!tid_agg_rx)
-@@ -4107,10 +4162,6 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
- };
- drv_event_callback(rx.local, rx.sdata, &event);
- }
-- /* FIXME: statistics won't be right with this */
-- link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0;
-- rx.link = rcu_dereference(sta->sdata->link[link_id]);
-- rx.link_sta = rcu_dereference(sta->link[link_id]);
-
- ieee80211_rx_handlers(&rx, &frames);
- }
-@@ -4126,7 +4177,6 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
- /* This is OK -- must be QoS data frame */
- .security_idx = tid,
- .seqno_idx = tid,
-- .link_id = -1,
- };
- int i, diff;
-
-@@ -4137,10 +4187,8 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
-
- sta = container_of(pubsta, struct sta_info, sta);
-
-- rx.sta = sta;
-- rx.sdata = sta->sdata;
-- rx.link = &rx.sdata->deflink;
-- rx.local = sta->local;
-+ if (!ieee80211_rx_data_set_sta(&rx, pubsta, -1))
-+ return;
-
- rcu_read_lock();
- tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
-@@ -4527,15 +4575,6 @@ void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
- mutex_unlock(&local->sta_mtx);
- }
-
--static bool
--ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id)
--{
-- if (!sta->mlo)
-- return false;
--
-- return !!(sta->valid_links & BIT(link_id));
--}
--
- static void ieee80211_rx_8023(struct ieee80211_rx_data *rx,
- struct ieee80211_fast_rx *fast_rx,
- int orig_len)
-@@ -4646,7 +4685,6 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
- struct sk_buff *skb = rx->skb;
- struct ieee80211_hdr *hdr = (void *)skb->data;
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-- struct sta_info *sta = rx->sta;
- int orig_len = skb->len;
- int hdrlen = ieee80211_hdrlen(hdr->frame_control);
- int snap_offs = hdrlen;
-@@ -4658,7 +4696,6 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
- u8 da[ETH_ALEN];
- u8 sa[ETH_ALEN];
- } addrs __aligned(2);
-- struct link_sta_info *link_sta;
- struct ieee80211_sta_rx_stats *stats;
-
- /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
-@@ -4761,18 +4798,10 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
- drop:
- dev_kfree_skb(skb);
-
-- if (rx->link_id >= 0) {
-- link_sta = rcu_dereference(sta->link[rx->link_id]);
-- if (!link_sta)
-- return true;
-- } else {
-- link_sta = &sta->deflink;
-- }
--
- if (fast_rx->uses_rss)
-- stats = this_cpu_ptr(link_sta->pcpu_rx_stats);
-+ stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats);
- else
-- stats = &link_sta->rx_stats;
-+ stats = &rx->link_sta->rx_stats;
-
- stats->dropped++;
- return true;
-@@ -4790,8 +4819,8 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
- struct ieee80211_local *local = rx->local;
- struct ieee80211_sub_if_data *sdata = rx->sdata;
- struct ieee80211_hdr *hdr = (void *)skb->data;
-- struct link_sta_info *link_sta = NULL;
-- struct ieee80211_link_data *link;
-+ struct link_sta_info *link_sta = rx->link_sta;
-+ struct ieee80211_link_data *link = rx->link;
-
- rx->skb = skb;
-
-@@ -4813,35 +4842,6 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
- if (!ieee80211_accept_frame(rx))
- return false;
-
-- if (rx->link_id >= 0) {
-- link = rcu_dereference(rx->sdata->link[rx->link_id]);
--
-- /* we might race link removal */
-- if (!link)
-- return true;
-- rx->link = link;
--
-- if (rx->sta) {
-- rx->link_sta =
-- rcu_dereference(rx->sta->link[rx->link_id]);
-- if (!rx->link_sta)
-- return true;
-- }
-- } else {
-- if (rx->sta)
-- rx->link_sta = &rx->sta->deflink;
--
-- rx->link = &sdata->deflink;
-- }
--
-- if (unlikely(!is_multicast_ether_addr(hdr->addr1) &&
-- rx->link_id >= 0 && rx->sta && rx->sta->sta.mlo)) {
-- link_sta = rcu_dereference(rx->sta->link[rx->link_id]);
--
-- if (WARN_ON_ONCE(!link_sta))
-- return true;
-- }
--
- if (!consume) {
- struct skb_shared_hwtstamps *shwt;
-
-@@ -4861,7 +4861,7 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
- shwt->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
- }
-
-- if (unlikely(link_sta)) {
-+ if (unlikely(rx->sta && rx->sta->sta.mlo)) {
- /* translate to MLD addresses */
- if (ether_addr_equal(link->conf->addr, hdr->addr1))
- ether_addr_copy(hdr->addr1, rx->sdata->vif.addr);
-@@ -4891,6 +4891,7 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- struct ieee80211_fast_rx *fast_rx;
- struct ieee80211_rx_data rx;
-+ int link_id = -1;
-
- memset(&rx, 0, sizeof(rx));
- rx.skb = skb;
-@@ -4907,12 +4908,8 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
- if (!pubsta)
- goto drop;
-
-- rx.sta = container_of(pubsta, struct sta_info, sta);
-- rx.sdata = rx.sta->sdata;
--
-- if (status->link_valid &&
-- !ieee80211_rx_is_valid_sta_link_id(pubsta, status->link_id))
-- goto drop;
-+ if (status->link_valid)
-+ link_id = status->link_id;
-
- /*
- * TODO: Should the frame be dropped if the right link_id is not
-@@ -4921,19 +4918,8 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
- * link_id is used only for stats purpose and updating the stats on
- * the deflink is fine?
- */
-- if (status->link_valid)
-- rx.link_id = status->link_id;
--
-- if (rx.link_id >= 0) {
-- struct ieee80211_link_data *link;
--
-- link = rcu_dereference(rx.sdata->link[rx.link_id]);
-- if (!link)
-- goto drop;
-- rx.link = link;
-- } else {
-- rx.link = &rx.sdata->deflink;
-- }
-+ if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
-+ goto drop;
-
- fast_rx = rcu_dereference(rx.sta->fast_rx);
- if (!fast_rx)
-@@ -4951,6 +4937,8 @@ static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
- {
- struct link_sta_info *link_sta;
- struct ieee80211_hdr *hdr = (void *)skb->data;
-+ struct sta_info *sta;
-+ int link_id = -1;
-
- /*
- * Look up link station first, in case there's a
-@@ -4960,24 +4948,19 @@ static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
- */
- link_sta = link_sta_info_get_bss(rx->sdata, hdr->addr2);
- if (link_sta) {
-- rx->sta = link_sta->sta;
-- rx->link_id = link_sta->link_id;
-+ sta = link_sta->sta;
-+ link_id = link_sta->link_id;
- } else {
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-
-- rx->sta = sta_info_get_bss(rx->sdata, hdr->addr2);
-- if (rx->sta) {
-- if (status->link_valid &&
-- !ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta,
-- status->link_id))
-- return false;
--
-- rx->link_id = status->link_valid ? status->link_id : -1;
-- } else {
-- rx->link_id = -1;
-- }
-+ sta = sta_info_get_bss(rx->sdata, hdr->addr2);
-+ if (status->link_valid)
-+ link_id = status->link_id;
- }
-
-+ if (!ieee80211_rx_data_set_sta(rx, &sta->sta, link_id))
-+ return false;
-+
- return ieee80211_prepare_and_rx_handle(rx, skb, consume);
- }
-
-@@ -5036,19 +5019,15 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
-
- if (ieee80211_is_data(fc)) {
- struct sta_info *sta, *prev_sta;
-- u8 link_id = status->link_id;
-+ int link_id = -1;
-
-- if (pubsta) {
-- rx.sta = container_of(pubsta, struct sta_info, sta);
-- rx.sdata = rx.sta->sdata;
-+ if (status->link_valid)
-+ link_id = status->link_id;
-
-- if (status->link_valid &&
-- !ieee80211_rx_is_valid_sta_link_id(pubsta, link_id))
-+ if (pubsta) {
-+ if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
- goto out;
-
-- if (status->link_valid)
-- rx.link_id = status->link_id;
--
- /*
- * In MLO connection, fetch the link_id using addr2
- * when the driver does not pass link_id in status.
-@@ -5066,7 +5045,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
- if (!link_sta)
- goto out;
-
-- rx.link_id = link_sta->link_id;
-+ ieee80211_rx_data_set_link(&rx, link_sta->link_id);
- }
-
- if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
-@@ -5082,30 +5061,27 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
- continue;
- }
-
-- if ((status->link_valid &&
-- !ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta,
-- link_id)) ||
-- (!status->link_valid && prev_sta->sta.mlo))
-+ rx.sdata = prev_sta->sdata;
-+ if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
-+ link_id))
-+ goto out;
-+
-+ if (!status->link_valid && prev_sta->sta.mlo)
- continue;
-
-- rx.link_id = status->link_valid ? link_id : -1;
-- rx.sta = prev_sta;
-- rx.sdata = prev_sta->sdata;
- ieee80211_prepare_and_rx_handle(&rx, skb, false);
-
- prev_sta = sta;
- }
-
- if (prev_sta) {
-- if ((status->link_valid &&
-- !ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta,
-- link_id)) ||
-- (!status->link_valid && prev_sta->sta.mlo))
-+ rx.sdata = prev_sta->sdata;
-+ if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
-+ link_id))
- goto out;
-
-- rx.link_id = status->link_valid ? link_id : -1;
-- rx.sta = prev_sta;
-- rx.sdata = prev_sta->sdata;
-+ if (!status->link_valid && prev_sta->sta.mlo)
-+ goto out;
-
- if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
- return;
---
-2.39.0
-
diff --git a/PKGBUILD b/PKGBUILD
index 56e23cc..2ebb43a 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -19,7 +19,7 @@ _custom=0
pkgbase=linux
_supver=6
_majver=1
-_minver=7
+_minver=10
_gccpatchver='20230105'
_gccpatchker='5.17+'
if [ "$_minver" == "0" ]; then
@@ -43,9 +43,6 @@ source=(
https://www.kernel.org/pub/linux/kernel/v${_supver}.x/${_srcname}.tar.{xz,sign}
config # the main kernel config file
0001-ZEN-Add-sysctl-and-CONFIG-to-disallow-unprivileged-C.patch
- 0002-Revert-drm-display-dp_mst-Move-all-payload-info-into.patch
- 0003-btrfs-fix-invalid-leaf-access-due-to-inline-extent-d.patch
- 0004-wifi-mac80211-fix-initialization-of-rx-link-and-rx-l.patch
kernel_compiler_patch-${_gccpatchver}.tar.gz::https://github.com/graysky2/kernel_compiler_patch/archive/${_gccpatchver}.tar.gz
ath9k-regdom-hack.patch
raid6-default-algo.patch
@@ -55,23 +52,17 @@ validpgpkeys=(
'647F28654894E3BD457199BE38DBBDC86092693E' # Greg Kroah-Hartman
)
# https://www.kernel.org/pub/linux/kernel/v5.x/sha256sums.asc
-sha256sums=('4ab048bad2e7380d3b827f1fad5ad2d2fc4f2e80e1c604d85d1f8781debe600f'
+sha256sums=('0be2919ba91cf5873a4cb4d429de78aad0469120d624e333a43b4b011d74d19d'
'SKIP'
'a77671535536c1b912f2824cf202e384470b983904fa3d1ab105cfd868872c3d'
'602a2fd8d11f86824889318ec33cdb20030cbd713d63514126e9c11dcb78ccf3'
- '5a9dccbf0edcc0e35c0469de43c17cf890cbd433c6befce68ed6f79c09b244db'
- 'd98df9708c63a756a2cf00d5bfd1a3644e20b9ddd4cca98fb28dec111a8746e4'
- '79d1ff9bc74d8e6d45c6200f3d9859cb169b0738befab283f376e17d6ae72b44'
'802946f623c69ae1a636b63697c23ca48af31a099415ed837d2c1e168a272d23'
'e9e0d289170b7fb598b572d9c892ae8d1420952034aa415e8b3334f20a58edcc'
'6ab863c8cfe6e0dd53a9d6455872fd4391508a8b97ab92e3f13558a6617b12a6')
-b2sums=('13c970a5780fd4ed97a0ff5d7c13e2f5cfebf608199ae94973f3a8a109bb961f1effb45bd61a687f5faaf784a84b4c88b5a4bb75b7a1a943f44cab9b6ad37ce1'
+b2sums=('926c499eb3260e4358b8112785e7be74062aca54b4d5c21d2729efc81329ae168c461d32f54061d8db05a12cac45b63ca97b74084a8af8138f547c3a2fc2d947'
'SKIP'
'04b86b565c344ded78538cd958465b3268f5f04ac8ca5f6b36b0ad54558102646a22526b5ec250e7a959fb698742b43a025df6c65cd2323f8514fe807fc6b20a'
'62b993e415186acc54f11c79f140f9f4df0c62658fa45e4895d131a3729810c181bfd7f13c6059ed882aa1d8ce79651acc30ab42077bae5497b4f0a1d05bf1aa'
- '11d46f3b7fdef0e1bb19959aa6414e9b59365be0f974077d80d3ac8cedea25f445e5549d0eb463042bf42f355937ae18d0d1ab78d386d65645cc3a2b657893b6'
- 'f751f7712a50fd7b22a9525921912904a5ec69facf29a967e987f591ec79e9e4d6ce23d3481940b20f1d5d9866c5d82c81655e3b51b9647efbe882a00a5c3172'
- 'f5298854e65f9e1c4e872b38300f408a47b5d1f0dec655cc2d2815247d4f15656c835145d6d5eeec448382df8faf006e9ad59eabf60e30f5b8ee66b44f86e1c9'
'd178dad69501967382d5c841f65e4f57651042bee8117041a9baa35ab3fa73af8174b8b999ae9e72ec381c52744ccaaabb77944d59f123c04b6ed5626432d843'
'b6ef77035611139fa9a6d5b8d30570e2781bb4da483bb569884b0bd0129b62e0b82a5a6776fefe43fee801c70d39de1ea4d4c177f7cedd5ac135e3c64f7b895a'
'e94aa35d92cec92f4b0d487e0569790f3b712b9eaa5107f14a4200578e398ca740bf369f30f070c8beb56a72d1a6d0fc06beb650d798a64f44abe5e3af327728')
@@ -94,9 +85,6 @@ prepare() {
# Hotfixes
echo "Applying hotfixes"
patch -p1 -i ../0001-ZEN-Add-sysctl-and-CONFIG-to-disallow-unprivileged-C.patch
- patch -p1 -i ../0002-Revert-drm-display-dp_mst-Move-all-payload-info-into.patch
- patch -p1 -i ../0003-btrfs-fix-invalid-leaf-access-due-to-inline-extent-d.patch
- patch -p1 -i ../0004-wifi-mac80211-fix-initialization-of-rx-link-and-rx-l.patch
# graysky gcc patch
echo "Applying graysky gcc patch"