Skip to content

Commit 381af8d

Browse files
committed
Merge tag 'block-6.14-20250306' of git://git.kernel.dk/linux
Pull block fixes from Jens Axboe: - NVMe pull request via Keith: - TCP use after free fix on polling (Sagi) - Controller memory buffer cleanup fixes (Icenowy) - Free leaking requests on bad user passthrough commands (Keith) - TCP error message fix (Maurizio) - TCP corruption fix on partial PDU (Maurizio) - TCP memory ordering fix for weakly ordered archs (Meir) - Type coercion fix on message error for TCP (Dan) - Name the RQF flags enum, fixing issues with anon enums and BPF import of it - ublk parameter setting fix - GPT partition 7-bit conversion fix * tag 'block-6.14-20250306' of git://git.kernel.dk/linux: block: Name the RQF flags enum nvme-tcp: fix signedness bug in nvme_tcp_init_connection() block: fix conversion of GPT partition name to 7-bit ublk: set_params: properly check if parameters can be applied nvmet-tcp: Fix a possible sporadic response drops in weakly ordered arch nvme-tcp: fix potential memory corruption in nvme_tcp_recv_pdu() nvme-tcp: Fix a C2HTermReq error message nvmet: remove old function prototype nvme-ioctl: fix leaked requests on mapping error nvme-pci: skip CMB blocks incompatible with PCI P2P DMA nvme-pci: clean up CMBMSC when registering CMB fails nvme-tcp: fix possible UAF in nvme_tcp_poll
2 parents d53276d + e711252 commit 381af8d

File tree

8 files changed

+75
-30
lines changed

8 files changed

+75
-30
lines changed

block/partitions/efi.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -682,7 +682,7 @@ static void utf16_le_to_7bit(const __le16 *in, unsigned int size, u8 *out)
682682
out[size] = 0;
683683

684684
while (i < size) {
685-
u8 c = le16_to_cpu(in[i]) & 0xff;
685+
u8 c = le16_to_cpu(in[i]) & 0x7f;
686686

687687
if (c && !isprint(c))
688688
c = '!';

drivers/block/ublk_drv.c

+5-2
Original file line numberDiff line numberDiff line change
@@ -2715,9 +2715,12 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
27152715
if (ph.len > sizeof(struct ublk_params))
27162716
ph.len = sizeof(struct ublk_params);
27172717

2718-
/* parameters can only be changed when device isn't live */
27192718
mutex_lock(&ub->mutex);
2720-
if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
2719+
if (test_bit(UB_STATE_USED, &ub->state)) {
2720+
/*
2721+
* Parameters can only be changed when device hasn't
2722+
* been started yet
2723+
*/
27212724
ret = -EACCES;
27222725
} else if (copy_from_user(&ub->params, argp, ph.len)) {
27232726
ret = -EFAULT;

drivers/nvme/host/ioctl.c

+8-4
Original file line numberDiff line numberDiff line change
@@ -128,8 +128,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
128128
if (!nvme_ctrl_sgl_supported(ctrl))
129129
dev_warn_once(ctrl->device, "using unchecked data buffer\n");
130130
if (has_metadata) {
131-
if (!supports_metadata)
132-
return -EINVAL;
131+
if (!supports_metadata) {
132+
ret = -EINVAL;
133+
goto out;
134+
}
133135
if (!nvme_ctrl_meta_sgl_supported(ctrl))
134136
dev_warn_once(ctrl->device,
135137
"using unchecked metadata buffer\n");
@@ -139,8 +141,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
139141
struct iov_iter iter;
140142

141143
/* fixedbufs is only for non-vectored io */
142-
if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
143-
return -EINVAL;
144+
if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) {
145+
ret = -EINVAL;
146+
goto out;
147+
}
144148
ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
145149
rq_data_dir(req), &iter, ioucmd);
146150
if (ret < 0)

drivers/nvme/host/pci.c

+13-8
Original file line numberDiff line numberDiff line change
@@ -1982,6 +1982,18 @@ static void nvme_map_cmb(struct nvme_dev *dev)
19821982
if (offset > bar_size)
19831983
return;
19841984

1985+
/*
1986+
* Controllers may support a CMB size larger than their BAR, for
1987+
* example, due to being behind a bridge. Reduce the CMB to the
1988+
* reported size of the BAR
1989+
*/
1990+
size = min(size, bar_size - offset);
1991+
1992+
if (!IS_ALIGNED(size, memremap_compat_align()) ||
1993+
!IS_ALIGNED(pci_resource_start(pdev, bar),
1994+
memremap_compat_align()))
1995+
return;
1996+
19851997
/*
19861998
* Tell the controller about the host side address mapping the CMB,
19871999
* and enable CMB decoding for the NVMe 1.4+ scheme:
@@ -1992,17 +2004,10 @@ static void nvme_map_cmb(struct nvme_dev *dev)
19922004
dev->bar + NVME_REG_CMBMSC);
19932005
}
19942006

1995-
/*
1996-
* Controllers may support a CMB size larger than their BAR,
1997-
* for example, due to being behind a bridge. Reduce the CMB to
1998-
* the reported size of the BAR
1999-
*/
2000-
if (size > bar_size - offset)
2001-
size = bar_size - offset;
2002-
20032007
if (pci_p2pdma_add_resource(pdev, bar, size, offset)) {
20042008
dev_warn(dev->ctrl.device,
20052009
"failed to register the CMB\n");
2010+
hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC);
20062011
return;
20072012
}
20082013

drivers/nvme/host/tcp.c

+36-9
Original file line numberDiff line numberDiff line change
@@ -217,6 +217,19 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
217217
return queue - queue->ctrl->queues;
218218
}
219219

220+
static inline bool nvme_tcp_recv_pdu_supported(enum nvme_tcp_pdu_type type)
221+
{
222+
switch (type) {
223+
case nvme_tcp_c2h_term:
224+
case nvme_tcp_c2h_data:
225+
case nvme_tcp_r2t:
226+
case nvme_tcp_rsp:
227+
return true;
228+
default:
229+
return false;
230+
}
231+
}
232+
220233
/*
221234
* Check if the queue is TLS encrypted
222235
*/
@@ -775,7 +788,7 @@ static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue,
775788
[NVME_TCP_FES_PDU_SEQ_ERR] = "PDU Sequence Error",
776789
[NVME_TCP_FES_HDR_DIGEST_ERR] = "Header Digest Error",
777790
[NVME_TCP_FES_DATA_OUT_OF_RANGE] = "Data Transfer Out Of Range",
778-
[NVME_TCP_FES_R2T_LIMIT_EXCEEDED] = "R2T Limit Exceeded",
791+
[NVME_TCP_FES_DATA_LIMIT_EXCEEDED] = "Data Transfer Limit Exceeded",
779792
[NVME_TCP_FES_UNSUPPORTED_PARAM] = "Unsupported Parameter",
780793
};
781794

@@ -818,6 +831,16 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
818831
return 0;
819832

820833
hdr = queue->pdu;
834+
if (unlikely(hdr->hlen != sizeof(struct nvme_tcp_rsp_pdu))) {
835+
if (!nvme_tcp_recv_pdu_supported(hdr->type))
836+
goto unsupported_pdu;
837+
838+
dev_err(queue->ctrl->ctrl.device,
839+
"pdu type %d has unexpected header length (%d)\n",
840+
hdr->type, hdr->hlen);
841+
return -EPROTO;
842+
}
843+
821844
if (unlikely(hdr->type == nvme_tcp_c2h_term)) {
822845
/*
823846
* C2HTermReq never includes Header or Data digests.
@@ -850,10 +873,13 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
850873
nvme_tcp_init_recv_ctx(queue);
851874
return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
852875
default:
853-
dev_err(queue->ctrl->ctrl.device,
854-
"unsupported pdu type (%d)\n", hdr->type);
855-
return -EINVAL;
876+
goto unsupported_pdu;
856877
}
878+
879+
unsupported_pdu:
880+
dev_err(queue->ctrl->ctrl.device,
881+
"unsupported pdu type (%d)\n", hdr->type);
882+
return -EINVAL;
857883
}
858884

859885
static inline void nvme_tcp_end_request(struct request *rq, u16 status)
@@ -1495,11 +1521,11 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
14951521
msg.msg_flags = MSG_WAITALL;
14961522
ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
14971523
iov.iov_len, msg.msg_flags);
1498-
if (ret < sizeof(*icresp)) {
1524+
if (ret >= 0 && ret < sizeof(*icresp))
1525+
ret = -ECONNRESET;
1526+
if (ret < 0) {
14991527
pr_warn("queue %d: failed to receive icresp, error %d\n",
15001528
nvme_tcp_queue_id(queue), ret);
1501-
if (ret >= 0)
1502-
ret = -ECONNRESET;
15031529
goto free_icresp;
15041530
}
15051531
ret = -ENOTCONN;
@@ -2699,16 +2725,17 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
26992725
{
27002726
struct nvme_tcp_queue *queue = hctx->driver_data;
27012727
struct sock *sk = queue->sock->sk;
2728+
int ret;
27022729

27032730
if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
27042731
return 0;
27052732

27062733
set_bit(NVME_TCP_Q_POLLING, &queue->flags);
27072734
if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
27082735
sk_busy_loop(sk, true);
2709-
nvme_tcp_try_recv(queue);
2736+
ret = nvme_tcp_try_recv(queue);
27102737
clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
2711-
return queue->nr_cqe;
2738+
return ret < 0 ? ret : queue->nr_cqe;
27122739
}
27132740

27142741
static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)

drivers/nvme/target/nvmet.h

-1
Original file line numberDiff line numberDiff line change
@@ -647,7 +647,6 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
647647
struct nvmet_host *host);
648648
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
649649
u8 event_info, u8 log_page);
650-
bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid);
651650

652651
#define NVMET_MIN_QUEUE_SIZE 16
653652
#define NVMET_MAX_QUEUE_SIZE 1024

drivers/nvme/target/tcp.c

+11-4
Original file line numberDiff line numberDiff line change
@@ -571,10 +571,16 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
571571
struct nvmet_tcp_cmd *cmd =
572572
container_of(req, struct nvmet_tcp_cmd, req);
573573
struct nvmet_tcp_queue *queue = cmd->queue;
574+
enum nvmet_tcp_recv_state queue_state;
575+
struct nvmet_tcp_cmd *queue_cmd;
574576
struct nvme_sgl_desc *sgl;
575577
u32 len;
576578

577-
if (unlikely(cmd == queue->cmd)) {
579+
/* Pairs with store_release in nvmet_prepare_receive_pdu() */
580+
queue_state = smp_load_acquire(&queue->rcv_state);
581+
queue_cmd = READ_ONCE(queue->cmd);
582+
583+
if (unlikely(cmd == queue_cmd)) {
578584
sgl = &cmd->req.cmd->common.dptr.sgl;
579585
len = le32_to_cpu(sgl->length);
580586

@@ -583,7 +589,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
583589
* Avoid using helpers, this might happen before
584590
* nvmet_req_init is completed.
585591
*/
586-
if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
592+
if (queue_state == NVMET_TCP_RECV_PDU &&
587593
len && len <= cmd->req.port->inline_data_size &&
588594
nvme_is_write(cmd->req.cmd))
589595
return;
@@ -847,8 +853,9 @@ static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
847853
{
848854
queue->offset = 0;
849855
queue->left = sizeof(struct nvme_tcp_hdr);
850-
queue->cmd = NULL;
851-
queue->rcv_state = NVMET_TCP_RECV_PDU;
856+
WRITE_ONCE(queue->cmd, NULL);
857+
/* Ensure rcv_state is visible only after queue->cmd is set */
858+
smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU);
852859
}
853860

854861
static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)

include/linux/blk-mq.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
2828
typedef __u32 __bitwise req_flags_t;
2929

3030
/* Keep rqf_name[] in sync with the definitions below */
31-
enum {
31+
enum rqf_flags {
3232
/* drive already may have started this one */
3333
__RQF_STARTED,
3434
/* request for flush sequence */

0 commit comments

Comments
 (0)