From 4573add760b8dd52a215fd134effb76da10ebcf5 Mon Sep 17 00:00:00 2001 From: Rene Sapiens Date: Fri, 6 Feb 2026 16:25:56 -0800 Subject: [PATCH 1/6] thunderbolt: Read router NVM version before applying quirks The router NVM version is currently only available after the NVMem devices have been registered. This is too late for firmware-dependent quirks that are evaluated during tb_switch_add() before device registration. Split router NVM handling into two phases: - tb_switch_nvm_init() allocates the NVM object and reads the version - tb_switch_nvm_add() registers the NVMem devices using the pre-read NVM This makes the NVM major/minor version available before tb_check_quirks() without changing when the NVMem devices are registered. Signed-off-by: Rene Sapiens Signed-off-by: Mika Westerberg --- drivers/thunderbolt/switch.c | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index e5b48a331c58..c2ad58b19e7b 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -347,7 +347,7 @@ static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) return ret; } -static int tb_switch_nvm_add(struct tb_switch *sw) +static int tb_switch_nvm_init(struct tb_switch *sw) { struct tb_nvm *nvm; int ret; @@ -365,6 +365,26 @@ static int tb_switch_nvm_add(struct tb_switch *sw) if (ret) goto err_nvm; + sw->nvm = nvm; + return 0; + +err_nvm: + tb_sw_dbg(sw, "NVM upgrade disabled\n"); + sw->no_nvm_upgrade = true; + if (!IS_ERR(nvm)) + tb_nvm_free(nvm); + + return ret; +} + +static int tb_switch_nvm_add(struct tb_switch *sw) +{ + struct tb_nvm *nvm = sw->nvm; + int ret; + + if (!nvm) + return 0; + /* * If the switch is in safe-mode the only accessible portion of * the NVM is the non-active one where userspace is expected to @@ -383,14 +403,12 @@ static int tb_switch_nvm_add(struct tb_switch *sw) goto err_nvm; } - sw->nvm = nvm; return 0; err_nvm: tb_sw_dbg(sw, "NVM upgrade disabled\n"); sw->no_nvm_upgrade = true; - if (!IS_ERR(nvm)) - tb_nvm_free(nvm); + tb_nvm_free(nvm); return ret; } @@ -3311,6 +3329,10 @@ int tb_switch_add(struct tb_switch *sw) return ret; } + ret = tb_switch_nvm_init(sw); + if (ret) + return ret; + if (!sw->safe_mode) { tb_switch_credits_init(sw); From 59b03d12b1f6d14d936a3ebec225f8d914dc3b70 Mon Sep 17 00:00:00 2001 From: Rene Sapiens Date: Fri, 6 Feb 2026 16:25:57 -0800 Subject: [PATCH 2/6] thunderbolt: Disable CLx on Titan Ridge-based devices with old firmware Thunderbolt 3 devices based on Titan Ridge routers with NVM firmware version < 0x65 have been observed to become unstable when CL states are enabled. This can lead to link disconnect events and the device failing to enumerate. Enable CLx on Titan Ridge only when the running NVM firmware version is >= 0x65. Signed-off-by: Rene Sapiens Signed-off-by: Mika Westerberg --- drivers/thunderbolt/quirks.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c index e81de9c30eac..9f7914ac2f48 100644 --- a/drivers/thunderbolt/quirks.c +++ b/drivers/thunderbolt/quirks.c @@ -23,6 +23,9 @@ static void quirk_dp_credit_allocation(struct tb_switch *sw) static void quirk_clx_disable(struct tb_switch *sw) { + if (tb_switch_is_titan_ridge(sw) && sw->nvm && sw->nvm->major >= 0x65) + return; + sw->quirks |= QUIRK_NO_CLX; tb_sw_dbg(sw, "disabling CL states\n"); } @@ -61,6 +64,10 @@ static const struct tb_quirk tb_quirks[] = { /* Dell WD19TB supports self-authentication on unplug */ { 0x0000, 0x0000, 0x00d4, 0xb070, quirk_force_power_link }, { 0x0000, 0x0000, 0x00d4, 0xb071, quirk_force_power_link }, + + /* Intel Titan Ridge CLx is unstable on early firmware versions */ + { 0x8086, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE, 0x0000, 0x0000, + quirk_clx_disable }, /* * Intel Goshen Ridge NVM 27 and before report wrong number of * DP buffers. From f791145abcb83faa6ba580f2b7a6cefef37b9cf3 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Thu, 5 Mar 2026 09:18:37 -0800 Subject: [PATCH 3/6] MAINTAINERS: Remove bouncing maintainer, Mika takes over DMA test driver This maintainer's email is now bouncing. Since Mika maintains the core Thunderbolt/USB4 driver he can take over this one too. Signed-off-by: Dave Hansen [mw: Put me as maintainer instead of orphaning it] Signed-off-by: Mika Westerberg --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 61bf550fd37c..1c5b16d80fdb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -26300,7 +26300,7 @@ F: drivers/media/i2c/thp7312.c F: include/uapi/linux/thp7312.h THUNDERBOLT DMA TRAFFIC TEST DRIVER -M: Isaac Hazan +M: Mika Westerberg L: linux-usb@vger.kernel.org S: Maintained F: drivers/thunderbolt/dma_test.c From 500e54d449f60e9692e2622ad2ba4f1e79590e87 Mon Sep 17 00:00:00 2001 From: Rosen Penev Date: Fri, 13 Mar 2026 14:41:37 -0700 Subject: [PATCH 4/6] thunderbolt: dma_port: kmalloc_array + kzalloc to flex Use a single allocation with a flexible array member. Simplifies allocation and freeing. Signed-off-by: Rosen Penev Signed-off-by: Mika Westerberg --- drivers/thunderbolt/dma_port.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c index 334fefe21255..c7c2942fa7be 100644 --- a/drivers/thunderbolt/dma_port.c +++ b/drivers/thunderbolt/dma_port.c @@ -55,7 +55,7 @@ struct tb_dma_port { struct tb_switch *sw; u8 port; u32 base; - u8 *buf; + u8 buf[]; }; /* @@ -209,16 +209,10 @@ struct tb_dma_port *dma_port_alloc(struct tb_switch *sw) if (port < 0) return NULL; - dma = kzalloc_obj(*dma); + dma = kzalloc_flex(*dma, buf, MAIL_DATA_DWORDS); if (!dma) return NULL; - dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL); - if (!dma->buf) { - kfree(dma); - return NULL; - } - dma->sw = sw; dma->port = port; dma->base = DMA_PORT_CAP; @@ -232,10 +226,7 @@ struct tb_dma_port *dma_port_alloc(struct tb_switch *sw) */ void dma_port_free(struct tb_dma_port *dma) { - if (dma) { - kfree(dma->buf); - kfree(dma); - } + kfree(dma); } static int dma_port_wait_for_completion(struct tb_dma_port *dma, From c3e7cc8bc5ca08b2fae3d43c7c86f140daa873ef Mon Sep 17 00:00:00 2001 From: Rosen Penev Date: Wed, 18 Mar 2026 11:52:37 -0700 Subject: [PATCH 5/6] thunderbolt: Use kzalloc_flex() for struct tb_path allocation Simplifies allocation of struct tb_path by using a flexible array member. Also added __counted_by for extra runtime analysis. Signed-off-by: Rosen Penev Reviewed-by: Kees Cook Signed-off-by: Mika Westerberg --- drivers/thunderbolt/path.c | 28 +++++++--------------------- drivers/thunderbolt/tb.h | 5 +++-- 2 files changed, 10 insertions(+), 23 deletions(-) diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c index 22fb4a1e1acd..8713ea0f47c1 100644 --- a/drivers/thunderbolt/path.c +++ b/drivers/thunderbolt/path.c @@ -150,22 +150,17 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, num_hops++; } - path = kzalloc_obj(*path); + path = kzalloc_flex(*path, hops, num_hops); if (!path) return NULL; + path->path_length = num_hops; + path->name = name; path->tb = src->sw->tb; - path->path_length = num_hops; path->activated = true; path->alloc_hopid = alloc_hopid; - path->hops = kzalloc_objs(*path->hops, num_hops); - if (!path->hops) { - kfree(path); - return NULL; - } - tb_dbg(path->tb, "discovering %s path starting from %llx:%u\n", path->name, tb_route(src->sw), src->port); @@ -245,10 +240,6 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, size_t num_hops; int i, ret; - path = kzalloc_obj(*path); - if (!path) - return NULL; - first_port = last_port = NULL; i = 0; tb_for_each_port_on_path(src, dst, in_port) { @@ -259,20 +250,17 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, } /* Check that src and dst are reachable */ - if (first_port != src || last_port != dst) { - kfree(path); + if (first_port != src || last_port != dst) return NULL; - } /* Each hop takes two ports */ num_hops = i / 2; - path->hops = kzalloc_objs(*path->hops, num_hops); - if (!path->hops) { - kfree(path); + path = kzalloc_flex(*path, hops, num_hops); + if (!path) return NULL; - } + path->path_length = num_hops; path->alloc_hopid = true; in_hopid = src_hopid; @@ -339,7 +327,6 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, } path->tb = tb; - path->path_length = num_hops; path->name = name; return path; @@ -372,7 +359,6 @@ void tb_path_free(struct tb_path *path) } } - kfree(path->hops); kfree(path); } diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index e96474f17067..217c3114bec8 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -419,9 +419,9 @@ enum tb_path_port { * @activated: Is the path active * @clear_fc: Clear all flow control from the path config space entries * when deactivating this path - * @hops: Path hops * @path_length: How many hops the path uses * @alloc_hopid: Does this path consume port HopID + * @hops: Path hops * * A path consists of a number of hops (see &struct tb_path_hop). To * establish a PCIe tunnel two paths have to be created between the two @@ -440,9 +440,10 @@ struct tb_path { bool drop_packages; bool activated; bool clear_fc; - struct tb_path_hop *hops; int path_length; bool alloc_hopid; + + struct tb_path_hop hops[] __counted_by(path_length); }; /* HopIDs 0-7 are reserved by the Thunderbolt protocol */ From 498c05821bb42f70e9bf6512c3dec4aa821815d0 Mon Sep 17 00:00:00 2001 From: Rosen Penev Date: Wed, 1 Apr 2026 14:47:26 -0700 Subject: [PATCH 6/6] thunderbolt: tunnel: Simplify allocation Use a flexible array member and kzalloc_flex to combine allocations. Add __counted_by for extra runtime analysis. Move counting variable assignment after allocation. kzalloc_flex with GCC >= 15 does this automatically. Signed-off-by: Rosen Penev Signed-off-by: Mika Westerberg --- drivers/thunderbolt/tunnel.c | 10 ++-------- drivers/thunderbolt/tunnel.h | 5 +++-- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index 89676acf1290..f38f7753b6e4 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -180,19 +180,14 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths, { struct tb_tunnel *tunnel; - tunnel = kzalloc_obj(*tunnel); + tunnel = kzalloc_flex(*tunnel, paths, npaths); if (!tunnel) return NULL; - tunnel->paths = kzalloc_objs(tunnel->paths[0], npaths); - if (!tunnel->paths) { - kfree(tunnel); - return NULL; - } + tunnel->npaths = npaths; INIT_LIST_HEAD(&tunnel->list); tunnel->tb = tb; - tunnel->npaths = npaths; tunnel->type = type; kref_init(&tunnel->kref); @@ -219,7 +214,6 @@ static void tb_tunnel_destroy(struct kref *kref) tb_path_free(tunnel->paths[i]); } - kfree(tunnel->paths); kfree(tunnel); } diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h index 2c44fc8a10bc..4878763a82b3 100644 --- a/drivers/thunderbolt/tunnel.h +++ b/drivers/thunderbolt/tunnel.h @@ -37,7 +37,6 @@ enum tb_tunnel_state { * @src_port: Source port of the tunnel * @dst_port: Destination port of the tunnel. For discovered incomplete * tunnels may be %NULL or null adapter port instead. - * @paths: All paths required by the tunnel * @npaths: Number of paths in @paths * @pre_activate: Optional tunnel specific initialization called before * activation. Can touch hardware. @@ -69,13 +68,13 @@ enum tb_tunnel_state { * @dprx_work: Worker that is scheduled to poll completion of DPRX capabilities read * @callback: Optional callback called when DP tunnel is fully activated * @callback_data: Optional data for @callback + * @paths: All paths required by the tunnel */ struct tb_tunnel { struct kref kref; struct tb *tb; struct tb_port *src_port; struct tb_port *dst_port; - struct tb_path **paths; size_t npaths; int (*pre_activate)(struct tb_tunnel *tunnel); int (*activate)(struct tb_tunnel *tunnel, bool activate); @@ -107,6 +106,8 @@ struct tb_tunnel { struct delayed_work dprx_work; void (*callback)(struct tb_tunnel *tunnel, void *data); void *callback_data; + + struct tb_path *paths[] __counted_by(npaths); }; struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,