diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 6e108086fb76..d2256fa95685 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2059,12 +2059,13 @@ static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl, } static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id, - struct queue_limits *lim) + struct nvme_id_ns_nvm *nvm, struct queue_limits *lim) { struct nvme_ns_head *head = ns->head; struct nvme_ctrl *ctrl = ns->ctrl; u32 bs = 1U << head->lba_shift; u32 atomic_bs, phys_bs, io_opt = 0; + u32 npdg = 1, npda = 1; bool valid = true; u8 optperf; @@ -2117,7 +2118,35 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id, else lim->max_hw_discard_sectors = 0; - lim->discard_granularity = lim->logical_block_size; + /* + * NVMe namespaces advertise both a preferred deallocate granularity + * (for a discard length) and alignment (for a discard starting offset). + * However, Linux block devices advertise a single discard_granularity. + * From NVM Command Set specification 1.1 section 5.2.2, the NPDGL/NPDAL + * fields in the NVM Command Set Specific Identify Namespace structure + * are preferred to NPDG/NPDA in the Identify Namespace structure since + * they can represent larger values. However, NPDGL or NPDAL may be 0 if + * unsupported. NPDG and NPDA are 0's based. + * From Figure 115 of NVM Command Set specification 1.1, NPDGL and NPDAL + * are supported if the high bit of OPTPERF is set. NPDG is supported if + * the low bit of OPTPERF is set. NPDA is supported if either is set. + * NPDG should be a multiple of NPDA, and likewise NPDGL should be a + * multiple of NPDAL, but the spec doesn't say anything about NPDG vs. + * NPDAL or NPDGL vs. NPDA. So compute the maximum instead of assuming + * NPDG(L) is the larger. If neither NPDG, NPDGL, NPDA, nor NPDAL are + * supported, default the discard_granularity to the logical block size. + */ + if (optperf & 0x2 && nvm && nvm->npdgl) + npdg = le32_to_cpu(nvm->npdgl); + else if (optperf & 0x1) + npdg = from0based(id->npdg); + if (optperf & 0x2 && nvm && nvm->npdal) + npda = le32_to_cpu(nvm->npdal); + else if (optperf) + npda = from0based(id->npda); + if (check_mul_overflow(max(npdg, npda), lim->logical_block_size, + &lim->discard_granularity)) + lim->discard_granularity = lim->logical_block_size; if (ctrl->dmrl) lim->max_discard_segments = ctrl->dmrl; @@ -2384,7 +2413,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, nvme_set_ctrl_limits(ns->ctrl, &lim, false); nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info); nvme_set_chunk_sectors(ns, id, &lim); - if (!nvme_update_disk_info(ns, id, &lim)) + if (!nvme_update_disk_info(ns, id, nvm, &lim)) capacity = 0; if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&