#define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
(get_sdist(last_pos, rq) > \
BFQQ_SEEK_THR && \
- (!blk_queue_nonrot(bfqd->queue) || \
+ (blk_queue_rot(bfqd->queue) || \
blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
#define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
/* don't use too short time intervals */
if (delta_usecs < 1000) {
- if (blk_queue_nonrot(bfqd->queue))
+ if (!blk_queue_rot(bfqd->queue))
/*
* give same worst-case guarantees as idling
* for seeky
struct bfq_queue *bfqq)
{
bool rot_without_queueing =
- !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
+ blk_queue_rot(bfqd->queue) && !bfqd->hw_tag,
bfqq_sequential_and_IO_bound,
idling_boosts_thr;
* flash-based device.
*/
idling_boosts_thr = rot_without_queueing ||
- ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
+ ((blk_queue_rot(bfqd->queue) || !bfqd->hw_tag) &&
bfqq_sequential_and_IO_bound);
/*
* there is only one in-flight large request
* at a time.
*/
- if (blk_queue_nonrot(bfqd->queue) &&
+ if (!blk_queue_rot(bfqd->queue) &&
blk_rq_sectors(bfqq->next_rq) >=
BFQQ_SECT_THR_NONROT &&
bfqd->tot_rq_in_driver >= 1)
bfqd->hw_tag_samples = 0;
bfqd->nonrot_with_queueing =
- blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag;
+ !blk_queue_rot(bfqd->queue) && bfqd->hw_tag;
}
static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
INIT_HLIST_HEAD(&bfqd->burst_list);
bfqd->hw_tag = -1;
- bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue);
+ bfqd->nonrot_with_queueing = !blk_queue_rot(bfqd->queue);
bfqd->bfq_max_budget = bfq_default_max_budget;
* Begin by assuming, optimistically, that the device peak
* rate is equal to 2/3 of the highest reference rate.
*/
- bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] *
- ref_wr_duration[blk_queue_nonrot(bfqd->queue)];
- bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
+ bfqd->rate_dur_prod = ref_rate[!blk_queue_rot(bfqd->queue)] *
+ ref_wr_duration[!blk_queue_rot(bfqd->queue)];
+ bfqd->peak_rate = ref_rate[!blk_queue_rot(bfqd->queue)] * 2 / 3;
/* see comments on the definition of next field inside bfq_data */
bfqd->actuator_load_threshold = 4;
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
-#define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL))
+#define blk_queue_rot(q) ((q)->limits.features & BLK_FEAT_ROTATIONAL)
#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
#define blk_queue_passthrough_stat(q) \
((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH)
static inline bool bdev_nonrot(struct block_device *bdev)
{
- return blk_queue_nonrot(bdev_get_queue(bdev));
+ return !blk_queue_rot(bdev_get_queue(bdev));
}
static inline bool bdev_synchronous(struct block_device *bdev)