Merge branch '7.1/scsi-queue' into 7.1/scsi-fixes

Pull in remaining commits from 7.1/scsi-queue.

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Martin K. Petersen 2026-04-26 21:15:04 -04:00
commit 98f69975d4
10 changed files with 70 additions and 12 deletions

View File

@ -37,7 +37,7 @@
#define TPGS_MODE_EXPLICIT 0x2 #define TPGS_MODE_EXPLICIT 0x2
#define ALUA_RTPG_SIZE 128 #define ALUA_RTPG_SIZE 128
#define ALUA_FAILOVER_TIMEOUT 60 #define ALUA_FAILOVER_TIMEOUT 255 /* max 255 (8-bit value) */
#define ALUA_FAILOVER_RETRIES 5 #define ALUA_FAILOVER_RETRIES 5
#define ALUA_RTPG_DELAY_MSECS 5 #define ALUA_RTPG_DELAY_MSECS 5
#define ALUA_RTPG_RETRY_DELAY 2 #define ALUA_RTPG_RETRY_DELAY 2

View File

@ -1491,7 +1491,7 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
phy_id = device->phy->identify.phy_identifier; phy_id = device->phy->identify.phy_identifier;
hdr->dw0 |= cpu_to_le32((1U << phy_id) hdr->dw0 |= cpu_to_le32((1U << phy_id)
<< CMD_HDR_PHY_ID_OFF); << CMD_HDR_PHY_ID_OFF);
hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK; hdr->dw0 |= cpu_to_le32(CMD_HDR_FORCE_PHY_MSK);
hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF); hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF);
} }

View File

@ -2738,8 +2738,20 @@ scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
pcie_device->enclosure_level, pcie_device->enclosure_level,
pcie_device->connector_name); pcie_device->connector_name);
/*
* The HBA firmware passes the NVMe drive's MDTS
* (Maximum Data Transfer Size) up to the driver. However,
* the driver hardcodes a 4K buffer size for the PRP list,
* accommodating at most 512 entries. This strictly limits
* the maximum supported NVMe I/O transfer to 2 MiB.
*
* Cap max_hw_sectors to the smaller of the drive's reported
* MDTS or the 2 MiB driver limit to prevent kernel oopses.
*/
lim->max_hw_sectors = SZ_2M >> SECTOR_SHIFT;
if (pcie_device->nvme_mdts) if (pcie_device->nvme_mdts)
lim->max_hw_sectors = pcie_device->nvme_mdts / 512; lim->max_hw_sectors = min(lim->max_hw_sectors,
pcie_device->nvme_mdts >> SECTOR_SHIFT);
pcie_device_put(pcie_device); pcie_device_put(pcie_device);
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);

View File

@ -657,7 +657,7 @@ struct pmcraid_hostrcb {
*/ */
struct pmcraid_instance { struct pmcraid_instance {
/* Array of allowed-to-be-exposed resources, initialized from /* Array of allowed-to-be-exposed resources, initialized from
* Configutation Table, later updated with CCNs * Configuration Table, later updated with CCNs
*/ */
struct pmcraid_resource_entry *res_entries; struct pmcraid_resource_entry *res_entries;

View File

@ -1801,7 +1801,7 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
} }
res = blk_rq_map_user_io(rq, md, hp->dxferp, hp->dxfer_len, res = blk_rq_map_user_io(rq, md, hp->dxferp, hp->dxfer_len,
GFP_ATOMIC, iov_count, iov_count, 1, rw); GFP_KERNEL, iov_count, iov_count, 1, rw);
if (!res) { if (!res) {
srp->bio = rq->bio; srp->bio = rq->bio;

View File

@ -9427,6 +9427,7 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
pqi_crash_if_pending_command(ctrl_info); pqi_crash_if_pending_command(ctrl_info);
pqi_reset(ctrl_info); pqi_reset(ctrl_info);
pqi_ctrl_unblock_device_reset(ctrl_info);
} }
static void pqi_process_lockup_action_param(void) static void pqi_process_lockup_action_param(void)

View File

@ -995,6 +995,7 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
int data_direction, payload_length; int data_direction, payload_length;
struct iscsi_ecdb_ahdr *ecdb_ahdr; struct iscsi_ecdb_ahdr *ecdb_ahdr;
struct iscsi_scsi_req *hdr; struct iscsi_scsi_req *hdr;
u16 ahslength, cdb_length;
int iscsi_task_attr; int iscsi_task_attr;
unsigned char *cdb; unsigned char *cdb;
int sam_task_attr; int sam_task_attr;
@ -1108,14 +1109,27 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
ISCSI_REASON_CMD_NOT_SUPPORTED, buf); ISCSI_REASON_CMD_NOT_SUPPORTED, buf);
} }
cdb = kmalloc(be16_to_cpu(ecdb_ahdr->ahslength) + 15, ahslength = be16_to_cpu(ecdb_ahdr->ahslength);
GFP_KERNEL); if (!ahslength) {
pr_err("Extended CDB AHS with zero length, protocol error.\n");
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_PROTOCOL_ERROR, buf);
}
if (ahslength > (hdr->hlength * 4) - 3) {
pr_err("Extended CDB AHS length %u exceeds available PDU buffer.\n",
ahslength);
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_PROTOCOL_ERROR, buf);
}
cdb_length = ahslength - 1 + ISCSI_CDB_SIZE;
cdb = kmalloc(cdb_length, GFP_KERNEL);
if (cdb == NULL) if (cdb == NULL)
return iscsit_add_reject_cmd(cmd, return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
memcpy(cdb, hdr->cdb, ISCSI_CDB_SIZE); memcpy(cdb, hdr->cdb, ISCSI_CDB_SIZE);
memcpy(cdb + ISCSI_CDB_SIZE, ecdb_ahdr->ecdb, memcpy(cdb + ISCSI_CDB_SIZE, ecdb_ahdr->ecdb, cdb_length - ISCSI_CDB_SIZE);
be16_to_cpu(ecdb_ahdr->ahslength) - 1);
} }
data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE : data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :

View File

@ -3249,7 +3249,7 @@ static ssize_t target_tg_pt_gp_members_show(struct config_item *item,
config_item_name(&lun->lun_group.cg_item)); config_item_name(&lun->lun_group.cg_item));
cur_len++; /* Extra byte for NULL terminator */ cur_len++; /* Extra byte for NULL terminator */
if ((cur_len + len) > PAGE_SIZE) { if (cur_len > TG_PT_GROUP_NAME_BUF || (cur_len + len) > PAGE_SIZE) {
pr_warn("Ran out of lu_gp_show_attr" pr_warn("Ran out of lu_gp_show_attr"
"_members buffer\n"); "_members buffer\n");
break; break;

View File

@ -9259,6 +9259,30 @@ static void ufshcd_config_mcq(struct ufs_hba *hba)
hba->nutrs); hba->nutrs);
} }
/**
* ufshcd_get_op_mode - get UFS operating mode.
* @hba: per-adapter instance
*
* Use the PA_PWRMODE value to represent the operating mode of UFS.
*
*/
static enum ufs_op_mode ufshcd_get_op_mode(struct ufs_hba *hba)
{
u32 mode;
u8 rx_mode;
u8 tx_mode;
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
rx_mode = (mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK;
tx_mode = mode & PWRMODE_MASK;
if ((rx_mode == SLOW_MODE || rx_mode == SLOWAUTO_MODE) &&
(tx_mode == SLOW_MODE || tx_mode == SLOWAUTO_MODE))
return LS_MODE;
return HS_MODE;
}
static int ufshcd_post_device_init(struct ufs_hba *hba) static int ufshcd_post_device_init(struct ufs_hba *hba)
{ {
int ret; int ret;
@ -9281,11 +9305,13 @@ static int ufshcd_post_device_init(struct ufs_hba *hba)
return 0; return 0;
/* /*
* Set the right value to bRefClkFreq before attempting to * Set the right value to bRefClkFreq in LS_MODE before attempting to
* switch to HS gears. * switch to HS gears.
*/ */
if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) if (ufshcd_get_op_mode(hba) == LS_MODE &&
hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
ufshcd_set_dev_ref_clk(hba); ufshcd_set_dev_ref_clk(hba);
/* Gear up to HS gear. */ /* Gear up to HS gear. */
ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info, ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info,
UFSHCD_PMC_POLICY_DONT_FORCE); UFSHCD_PMC_POLICY_DONT_FORCE);

View File

@ -333,6 +333,11 @@ enum ufs_eom_eye_mask {
#define DME_LocalTC0ReplayTimeOutVal 0xD042 #define DME_LocalTC0ReplayTimeOutVal 0xD042
#define DME_LocalAFC0ReqTimeOutVal 0xD043 #define DME_LocalAFC0ReqTimeOutVal 0xD043
enum ufs_op_mode {
LS_MODE = 1,
HS_MODE = 2,
};
/* PA power modes */ /* PA power modes */
enum ufs_pa_pwr_mode { enum ufs_pa_pwr_mode {
FAST_MODE = 1, FAST_MODE = 1,