Skip to content

Commit

Permalink
block/: Import from realme-mt6833-T-oss
Browse files Browse the repository at this point in the history
Change-Id: Ie092cd192ec591df7e0179462cc05a554e6c832e
Signed-off-by: techyminati <[email protected]>
  • Loading branch information
techyminati committed Mar 21, 2023
1 parent 09a94bb commit 359289d
Show file tree
Hide file tree
Showing 14 changed files with 330 additions and 18 deletions.
8 changes: 7 additions & 1 deletion block/Kconfig
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ config LBDAF
This option is required to support the full capacity of large
(2TB+) block devices, including RAID, disk, Network Block Device,
Logical Volume Manager (LVM) and loopback.

This option also enables support for single files larger than
2TB.

Expand Down Expand Up @@ -246,3 +246,9 @@ config BLK_MQ_RDMA
default y

source block/Kconfig.iosched


#ifdef OPLUS_FEATURE_SCHED_ASSIST
source block/uxio_first/Kconfig
#endif

8 changes: 7 additions & 1 deletion block/Makefile
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -39,4 +39,10 @@ obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += keyslot-manager.o bio-crypt-ctx.o \
blk-crypto.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o

obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o

#ifdef OPLUS_FEATURE_SCHED_ASSIST
obj-$(CONFIG_OPLUS_FEATURE_UXIO_FIRST) += uxio_first/
#endif /*OPLUS_FEATURE_SCHED_ASSIST*/

108 changes: 95 additions & 13 deletions block/blk-core.c
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,18 @@
#include "blk-rq-qos.h"
#include "mtk_mmc_block.h"

#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
#include "uxio_first/uxio_first_opt.h"
#endif

#ifdef CONFIG_DEBUG_FS
struct dentry *blk_debugfs_root;
#endif

#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
#include <linux/iomonitor/iomonitor.h>
#endif /*OPLUS_FEATURE_IOMONITOR*/

EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
Expand Down Expand Up @@ -191,6 +199,11 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
memset(rq, 0, sizeof(*rq));

INIT_LIST_HEAD(&rq->queuelist);

#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
INIT_LIST_HEAD(&rq->ux_fg_bg_list);
#endif

INIT_LIST_HEAD(&rq->timeout_list);
rq->cpu = -1;
rq->q = q;
Expand Down Expand Up @@ -1017,6 +1030,13 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
return NULL;

INIT_LIST_HEAD(&q->queue_head);

#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
INIT_LIST_HEAD(&q->ux_head);
INIT_LIST_HEAD(&q->fg_head);
INIT_LIST_HEAD(&q->bg_head);
#endif

q->last_merge = NULL;
q->end_sector = 0;
q->boundary_rq = NULL;
Expand Down Expand Up @@ -1477,7 +1497,9 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
*/
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;

#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_init_reqstats(rq);
#endif /*OPLUS_FEATURE_IOMONITOR*/
trace_block_getrq(q, bio, op);
return rq;

Expand Down Expand Up @@ -1771,8 +1793,11 @@ void __blk_put_request(struct request_queue *q, struct request *req)
/* this is a bio leak */
WARN_ON(req->bio != NULL);

#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
rq_qos_done(q, req, (bool)((req->cmd_flags & REQ_FG)||(req->cmd_flags & REQ_UX)));
#else
rq_qos_done(q, req);

#endif
/*
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
Expand Down Expand Up @@ -1983,10 +2008,16 @@ unsigned int blk_plug_queued_count(struct request_queue *q)
void blk_init_request_from_bio(struct request *req, struct bio *bio)
{
struct io_context *ioc = rq_ioc(bio);

if (bio->bi_opf & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;

#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
if (bio->bi_opf & REQ_UX)
req->cmd_flags |= REQ_UX;
else if (bio->bi_opf & REQ_FG)
req->cmd_flags |= REQ_FG;
#endif

req->__sector = bio->bi_iter.bi_sector;
if (ioprio_valid(bio_prio(bio)))
req->ioprio = bio_prio(bio);
Expand Down Expand Up @@ -2567,11 +2598,17 @@ blk_qc_t submit_bio(struct bio *bio)

if (op_is_write(bio_op(bio))) {
count_vm_events(PGPGOUT, count);
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_update_vm_stats(PGPGOUT, count);
#endif/*OPLUS_FEATURE_IOMONITOR*/
} else {
if (bio_flagged(bio, BIO_WORKINGSET))
workingset_read = true;
task_io_account_read(bio->bi_iter.bi_size);
count_vm_events(PGPGIN, count);
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_update_vm_stats(PGPGIN, count);
#endif/*OPLUS_FEATURE_IOMONITOR*/
}

if (unlikely(block_dump)) {
Expand All @@ -2583,7 +2620,12 @@ blk_qc_t submit_bio(struct bio *bio)
bio_devname(bio, b), count);
}
}

#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
if (test_task_ux(current))
bio->bi_opf |= REQ_UX;
else if (high_prio_for_task(current))
bio->bi_opf |= REQ_FG;
#endif
/*
* If we're reading data that is part of the userspace
* workingset, count submission time as memory stall. When the
Expand Down Expand Up @@ -2790,7 +2832,11 @@ void blk_account_io_done(struct request *req, u64 now)
* Don't process normal requests when queue is suspended
* or in the process of suspending/resuming
*/
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
bool blk_pm_allow_request(struct request *rq)
#else
static bool blk_pm_allow_request(struct request *rq)
#endif
{
switch (rq->q->rpm_status) {
case RPM_RESUMING:
Expand All @@ -2803,7 +2849,11 @@ static bool blk_pm_allow_request(struct request *rq)
}
}
#else
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
bool blk_pm_allow_request(struct request *rq)
#else
static bool blk_pm_allow_request(struct request *rq)
#endif
{
return true;
}
Expand Down Expand Up @@ -2853,14 +2903,26 @@ static struct request *elv_next_request(struct request_queue *q)
WARN_ON_ONCE(q->mq_ops);

while (1) {
list_for_each_entry(rq, &q->queue_head, queuelist) {
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
if (likely(sysctl_uxio_io_opt)){
rq = smart_peek_request(q);
if (rq)
return rq;
} else
{
#endif
list_for_each_entry(rq, &q->queue_head, queuelist) {
if (blk_pm_allow_request(rq))
return rq;

if (rq->rq_flags & RQF_SOFTBARRIER)
break;
}

#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
}
#endif

/*
* Flush request is running and flush request isn't queueable
* in the drive, we can hold the queue till flush request is
Expand Down Expand Up @@ -2924,6 +2986,9 @@ struct request *blk_peek_request(struct request_queue *q)
* not be passed by new incoming requests
*/
rq->rq_flags |= RQF_STARTED;
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
rq->req_td = ktime_get();
#endif /*OPLUS_FEATURE_IOMONITOR*/
trace_block_rq_issue(q, rq);
}

Expand Down Expand Up @@ -2983,7 +3048,9 @@ struct request *blk_peek_request(struct request_queue *q)
break;
}
}

#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_record_io_history(rq);
#endif /*OPLUS_FEATURE_IOMONITOR*/
return rq;
}
EXPORT_SYMBOL(blk_peek_request);
Expand All @@ -2997,13 +3064,21 @@ static void blk_dequeue_request(struct request *rq)

list_del_init(&rq->queuelist);

/*
* the time frame between a request being removed from the lists
* and to it is freed is accounted as io that is in progress at
* the driver side.
*/
if (blk_account_rq(rq))
q->in_flight[rq_is_sync(rq)]++;
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
list_del_init(&rq->ux_fg_bg_list);
#endif


#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]++;
ohm_ioqueue_add_inflight(q, rq);
}
#else
if (blk_account_rq(rq))
q->in_flight[rq_is_sync(rq)]++;
#endif

}

/**
Expand Down Expand Up @@ -3114,6 +3189,9 @@ bool blk_update_request(struct request *req, blk_status_t error,
int total_bytes;

trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_record_reqstats(req, nr_bytes);
#endif /*OPLUS_FEATURE_IOMONITOR*/

if (!req->bio)
return false;
Expand Down Expand Up @@ -3251,7 +3329,11 @@ void blk_finish_request(struct request *req, blk_status_t error)
blk_account_io_done(req, now);

if (req->end_io) {
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
rq_qos_done(q, req, (bool)((req->cmd_flags & REQ_FG)||(req->cmd_flags & REQ_UX)));
#else
rq_qos_done(q, req);
#endif
req->end_io(req, error);
} else {
if (blk_bidi_rq(req))
Expand Down
17 changes: 17 additions & 0 deletions block/blk-flush.c
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,11 @@
#include "blk-mq-tag.h"
#include "blk-mq-sched.h"


#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
#include "uxio_first/uxio_first_opt.h"
#endif

/* PREFLUSH/FUA sequences */
enum {
REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
Expand Down Expand Up @@ -142,6 +147,10 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front)
list_add(&rq->queuelist, &rq->q->queue_head);
else
list_add_tail(&rq->queuelist, &rq->q->queue_head);

#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
queue_throtl_add_request(rq->q, rq, add_front);
#endif /*OPLUS_FEATURE_SCHED_ASSIST*/
return true;
}
}
Expand Down Expand Up @@ -497,7 +506,15 @@ void blk_insert_flush(struct request *rq)
if (q->mq_ops)
blk_mq_request_bypass_insert(rq, false);
else

#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
{
list_add_tail(&rq->queuelist, &q->queue_head);
queue_throtl_add_request(q, rq, false);
}
#else
list_add_tail(&rq->queuelist, &q->queue_head);
#endif /*OPLUS_FEATURE_FG_IO_OPT*/
return;
}

Expand Down
9 changes: 8 additions & 1 deletion block/blk-mq.c
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -506,8 +506,11 @@ void blk_mq_free_request(struct request *rq)

if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
laptop_io_completion(q->backing_dev_info);

#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
rq_qos_done(q, rq,(bool)((rq->cmd_flags & REQ_FG)||(rq->cmd_flags & REQ_UX)));
#else
rq_qos_done(q, rq);
#endif

if (blk_rq_rl(rq))
blk_put_rl(blk_rq_rl(rq));
Expand All @@ -530,7 +533,11 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
blk_account_io_done(rq, now);

if (rq->end_io) {
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
rq_qos_done(rq->q, rq,(bool)((rq->cmd_flags & REQ_FG)||(rq->cmd_flags & REQ_UX)));
#else
rq_qos_done(rq->q, rq);
#endif
rq->end_io(rq, error);
} else {
if (unlikely(blk_bidi_rq(rq)))
Expand Down
12 changes: 11 additions & 1 deletion block/blk-rq-qos.c
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,17 @@ void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
rqos->ops->cleanup(rqos, bio);
}
}
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
void rq_qos_done(struct request_queue *q, struct request *rq, bool fgux)
{
struct rq_qos *rqos;

for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
if (rqos->ops->done)
rqos->ops->done(rqos, rq, fgux);
}
}
#else
void rq_qos_done(struct request_queue *q, struct request *rq)
{
struct rq_qos *rqos;
Expand All @@ -46,7 +56,7 @@ void rq_qos_done(struct request_queue *q, struct request *rq)
rqos->ops->done(rqos, rq);
}
}

#endif
void rq_qos_issue(struct request_queue *q, struct request *rq)
{
struct rq_qos *rqos;
Expand Down
8 changes: 8 additions & 0 deletions block/blk-rq-qos.h
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,11 @@ struct rq_qos_ops {
void (*track)(struct rq_qos *, struct request *, struct bio *);
void (*issue)(struct rq_qos *, struct request *);
void (*requeue)(struct rq_qos *, struct request *);
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
void (*done)(struct rq_qos *, struct request *, bool);
#else
void (*done)(struct rq_qos *, struct request *);
#endif
void (*done_bio)(struct rq_qos *, struct bio *);
void (*cleanup)(struct rq_qos *, struct bio *);
void (*exit)(struct rq_qos *);
Expand Down Expand Up @@ -96,7 +100,11 @@ bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
bool rq_depth_calc_max_depth(struct rq_depth *rqd);

void rq_qos_cleanup(struct request_queue *, struct bio *);
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
void rq_qos_done(struct request_queue *, struct request *, bool);
#else
void rq_qos_done(struct request_queue *, struct request *);
#endif
void rq_qos_issue(struct request_queue *, struct request *);
void rq_qos_requeue(struct request_queue *, struct request *);
void rq_qos_done_bio(struct request_queue *q, struct bio *bio);
Expand Down
Loading

0 comments on commit 359289d

Please sign in to comment.