Skip to content
This repository has been archived by the owner on Oct 28, 2022. It is now read-only.

Commit

Permalink
scsi: ufs: Add simple IRQ-affined PM QoS operations
Browse files Browse the repository at this point in the history
Qualcomm's PM QoS solution suffers from a number of issues: applying
PM QoS to all CPUs, convoluted spaghetti code that wastes CPU cycles,
and keeping PM QoS applied for 10 ms after all requests finish
processing.

This implements a simple IRQ-affined PM QoS mechanism for each UFS
adapter which uses atomics to elide locking, and enqueues a worker to
apply PM QoS to the target CPU as soon as a command request is issued.

Signed-off-by: Sultan Alsawaf <[email protected]>
Signed-off-by: Yaroslav Furman <[email protected]>
  • Loading branch information
kerneltoast authored and YaroST12 committed Mar 25, 2021
1 parent eb29144 commit f2eb938
Show file tree
Hide file tree
Showing 2 changed files with 84 additions and 5 deletions.
79 changes: 74 additions & 5 deletions drivers/scsi/ufs/ufshcd.c
Original file line number Diff line number Diff line change
Expand Up @@ -3751,6 +3751,48 @@ static inline void ufshcd_put_read_lock(struct ufs_hba *hba)
up_read(&hba->lock);
}

static void ufshcd_pm_qos_get_worker(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, typeof(*hba), pm_qos.get_work);

if (!atomic_read(&hba->pm_qos.count))
return;

mutex_lock(&hba->pm_qos.lock);
if (atomic_read(&hba->pm_qos.count) && !hba->pm_qos.active) {
pm_qos_update_request(&hba->pm_qos.req, 100);
hba->pm_qos.active = true;
}
mutex_unlock(&hba->pm_qos.lock);
}

static void ufshcd_pm_qos_put_worker(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, typeof(*hba), pm_qos.put_work);

if (atomic_read(&hba->pm_qos.count))
return;

mutex_lock(&hba->pm_qos.lock);
if (!atomic_read(&hba->pm_qos.count) && hba->pm_qos.active) {
pm_qos_update_request(&hba->pm_qos.req, PM_QOS_DEFAULT_VALUE);
hba->pm_qos.active = false;
}
mutex_unlock(&hba->pm_qos.lock);
}

static void ufshcd_pm_qos_get(struct ufs_hba *hba)
{
if (atomic_inc_return(&hba->pm_qos.count) == 1)
queue_work(system_unbound_wq, &hba->pm_qos.get_work);
}

static void ufshcd_pm_qos_put(struct ufs_hba *hba)
{
if (atomic_dec_return(&hba->pm_qos.count) == 0)
queue_work(system_unbound_wq, &hba->pm_qos.put_work);
}

/**
* ufshcd_queuecommand - main entry point for SCSI requests
* @cmd: command from SCSI Midlayer
Expand All @@ -3766,12 +3808,16 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
int tag;
int err = 0;
bool has_read_lock = false;
bool cmd_sent = false;

hba = shost_priv(host);

if (!cmd || !cmd->request || !hba)
return -EINVAL;

/* Wake the CPU managing the IRQ as soon as possible */
ufshcd_pm_qos_get(hba);

tag = cmd->request->tag;
if (!ufshcd_valid_tag(hba, tag)) {
dev_err(hba->dev,
Expand All @@ -3786,13 +3832,16 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (!ufshcd_vops_crypto_engine_get_req_status(hba)) {
set_host_byte(cmd, DID_ERROR);
cmd->scsi_done(cmd);
return 0;
err = 0;
} else {
return SCSI_MLQUEUE_HOST_BUSY;
err = SCSI_MLQUEUE_HOST_BUSY;
}
goto out_pm_qos;
}
if (err == -EAGAIN) {
err = SCSI_MLQUEUE_HOST_BUSY;
goto out_pm_qos;
}
if (err == -EAGAIN)
return SCSI_MLQUEUE_HOST_BUSY;
} else if (err == 1) {
has_read_lock = true;
}
Expand Down Expand Up @@ -3957,16 +4006,22 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (has_read_lock)
ufshcd_put_read_lock(hba);
cmd->scsi_done(cmd);
return 0;
err = 0;
goto out_pm_qos;
}
goto out;
}

cmd_sent = true;

out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
if (has_read_lock)
ufshcd_put_read_lock(hba);
out_pm_qos:
if (!cmd_sent)
ufshcd_pm_qos_put(hba);
return err;
}

Expand Down Expand Up @@ -6494,6 +6549,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
__ufshcd_release(hba, false);
__ufshcd_hibern8_release(hba, false);
if (cmd->request) {
ufshcd_pm_qos_put(hba);
ufshcd_vops_crypto_engine_cfg_end(hba,
lrbp, cmd->request);
}
Expand Down Expand Up @@ -6576,6 +6632,7 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
* this must be called before calling
* ->scsi_done() callback.
*/
ufshcd_pm_qos_put(hba);
ufshcd_vops_crypto_engine_cfg_end(hba,
lrbp, cmd->request);
}
Expand Down Expand Up @@ -10949,6 +11006,9 @@ void ufshcd_remove(struct ufs_hba *hba)
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba, true);
cancel_work_sync(&hba->pm_qos.put_work);
cancel_work_sync(&hba->pm_qos.get_work);
pm_qos_remove_request(&hba->pm_qos.req);

ufshcd_exit_clk_gating(hba);
ufshcd_exit_hibern8_on_idle(hba);
Expand Down Expand Up @@ -11147,6 +11207,14 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
*/
mb();

mutex_init(&hba->pm_qos.lock);
INIT_WORK(&hba->pm_qos.get_work, ufshcd_pm_qos_get_worker);
INIT_WORK(&hba->pm_qos.put_work, ufshcd_pm_qos_put_worker);
hba->pm_qos.req.type = PM_QOS_REQ_AFFINE_IRQ;
hba->pm_qos.req.irq = irq;
pm_qos_add_request(&hba->pm_qos.req, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);

/* IRQ registration */
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED,
dev_name(dev), hba);
Expand Down Expand Up @@ -11245,6 +11313,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
out_remove_scsi_host:
scsi_remove_host(hba->host);
exit_gating:
pm_qos_remove_request(&hba->pm_qos.req);
ufshcd_exit_clk_gating(hba);
ufshcd_exit_latency_hist(hba);
out_disable:
Expand Down
10 changes: 10 additions & 0 deletions drivers/scsi/ufs/ufshcd.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/extcon.h>
#include <linux/pm_qos.h>
#include "unipro.h"

#include <asm/irq.h>
Expand Down Expand Up @@ -1044,6 +1045,15 @@ struct ufs_hba {
bool force_g4;
/* distinguish between resume and restore */
bool restore;

struct {
struct pm_qos_request req;
struct work_struct get_work;
struct work_struct put_work;
struct mutex lock;
atomic_t count;
bool active;
} pm_qos;
};

static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
Expand Down

0 comments on commit f2eb938

Please sign in to comment.