- 根目录:
- include
- linux
- backing-dev.h
/*
* include/linux/backing-dev.h
*
* low-level device information and state which is propagated up through
* to high-level code.
*/
#ifndef _LINUX_BACKING_DEV_H
#define _LINUX_BACKING_DEV_H
#include <linux/percpu_counter.h>
#include <linux/log2.h>
#include <linux/flex_proportions.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/writeback.h>
#include <linux/atomic.h>
#include <linux/sysctl.h>
#include <linux/workqueue.h>
struct page;
struct device;
struct dentry;
/*
* Bits in backing_dev_info.state
*/
enum bdi_state {
BDI_async_congested, /* The async (write) queue is getting full */
BDI_sync_congested, /* The sync queue is getting full */
BDI_registered, /* bdi_register() was done */
BDI_writeback_running, /* Writeback is in progress */
};
typedef int (congested_fn)(void *, int);
enum bdi_stat_item {
BDI_RECLAIMABLE,
BDI_WRITEBACK,
BDI_DIRTIED,
BDI_WRITTEN,
NR_BDI_STAT_ITEMS
};
#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
struct bdi_writeback {
struct backing_dev_info *bdi; /* our parent bdi */
unsigned long last_old_flush; /* last old data flush */
struct delayed_work dwork; /* work item used for writeback */
struct list_head b_dirty; /* dirty inodes */
struct list_head b_io; /* parked for writeback */
struct list_head b_more_io; /* parked for more writeback */
struct list_head b_dirty_time; /* time stamps are dirty */
spinlock_t list_lock; /* protects the b_* lists */
};
struct backing_dev_info {
struct list_head bdi_list;
unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
unsigned long state; /* Always use atomic bitops on this */
unsigned int capabilities; /* Device capabilities */
congested_fn *congested_fn; /* Function pointer if device is md/dm */
void *congested_data; /* Pointer to aux data for congested func */
char *name;
struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
unsigned long bw_time_stamp; /* last time write bw is updated */
unsigned long dirtied_stamp;
unsigned long written_stamp; /* pages written at bw_time_stamp */
unsigned long write_bandwidth; /* the estimated write bandwidth */
unsigned long avg_write_bandwidth; /* further smoothed write bw */
/*
* The base dirty throttle rate, re-calculated on every 200ms.
* All the bdi tasks' dirty rate will be curbed under it.
* @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
* in small steps and is much more smooth/stable than the latter.
*/
unsigned long dirty_ratelimit;
unsigned long balanced_dirty_ratelimit;
struct fprop_local_percpu completions;
int dirty_exceeded;
unsigned int min_ratio;
unsigned int max_ratio, max_prop_frac;
struct bdi_writeback wb; /* default writeback info for this bdi */
spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */
struct list_head work_list;
struct device *dev;
struct timer_list laptop_mode_wb_timer;
#ifdef CONFIG_DEBUG_FS
struct dentry *debug_dir;
struct dentry *debug_stats;
#endif
};
struct backing_dev_info *inode_to_bdi(struct inode *inode);
int __must_check bdi_init(struct backing_dev_info *bdi);
void bdi_destroy(struct backing_dev_info *bdi);
__printf(3, 4)
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
enum wb_reason reason);
void bdi_start_background_writeback(struct backing_dev_info *bdi);
void bdi_writeback_workfn(struct work_struct *work);
int bdi_has_dirty_io(struct backing_dev_info *bdi);
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
extern spinlock_t bdi_lock;
extern struct list_head bdi_list;
extern struct workqueue_struct *bdi_wq;
static inline int wb_has_dirty_io(struct bdi_writeback *wb)
{
return !list_empty(&wb->b_dirty) ||
!list_empty(&wb->b_io) ||
!list_empty(&wb->b_more_io);
}
static inline void __add_bdi_stat(struct backing_dev_info *bdi,
enum bdi_stat_item item, s64 amount)
{
__percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
}
static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
enum bdi_stat_item item)
{
__add_bdi_stat(bdi, item, 1);
}
static inline void inc_bdi_stat(struct backing_dev_info *bdi,
enum bdi_stat_item item)
{
unsigned long flags;
local_irq_save(flags);
__inc_bdi_stat(bdi, item);
local_irq_restore(flags);
}
static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
enum bdi_stat_item item)
{
__add_bdi_stat(bdi, item, -1);
}
static inline void dec_bdi_stat(struct backing_dev_info *bdi,
enum bdi_stat_item item)
{
unsigned long flags;
local_irq_save(flags);
__dec_bdi_stat(bdi, item);
local_irq_restore(flags);
}
static inline s64 bdi_stat(struct backing_dev_info *bdi,
enum bdi_stat_item item)
{
return percpu_counter_read_positive(&bdi->bdi_stat[item]);
}
static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
enum bdi_stat_item item)
{
return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
}
static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
enum bdi_stat_item item)
{
s64 sum;
unsigned long flags;
local_irq_save(flags);
sum = __bdi_stat_sum(bdi, item);
local_irq_restore(flags);
return sum;
}
extern void bdi_writeout_inc(struct backing_dev_info *bdi);
/*
* maximal error of a stat counter.
*/
static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
{
#ifdef CONFIG_SMP
return nr_cpu_ids * BDI_STAT_BATCH;
#else
return 1;
#endif
}
int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
/*
* Flags in backing_dev_info::capability
*
* The first three flags control whether dirty pages will contribute to the
* VM's accounting and whether writepages() should be called for dirty pages
* (something that would not, for example, be appropriate for ramfs)
*
* WARNING: these flags are closely related and should not normally be
* used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
* three flags into a single convenience macro.
*
* BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
* BDI_CAP_NO_WRITEBACK: Don't write pages back
* BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
* BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
*/
#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
#define BDI_CAP_NO_WRITEBACK 0x00000002
#define BDI_CAP_NO_ACCT_WB 0x00000004
#define BDI_CAP_STABLE_WRITES 0x00000008
#define BDI_CAP_STRICTLIMIT 0x00000010
#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
extern struct backing_dev_info noop_backing_dev_info;
int writeback_in_progress(struct backing_dev_info *bdi);
static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
{
if (bdi->congested_fn)
return bdi->congested_fn(bdi->congested_data, bdi_bits);
return (bdi->state & bdi_bits);
}
static inline int bdi_read_congested(struct backing_dev_info *bdi)
{
return bdi_congested(bdi, 1 << BDI_sync_congested);
}
static inline int bdi_write_congested(struct backing_dev_info *bdi)
{
return bdi_congested(bdi, 1 << BDI_async_congested);
}
static inline int bdi_rw_congested(struct backing_dev_info *bdi)
{
return bdi_congested(bdi, (1 << BDI_sync_congested) |
(1 << BDI_async_congested));
}
enum {
BLK_RW_ASYNC = 0,
BLK_RW_SYNC = 1,
};
void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
void set_bdi_congested(struct backing_dev_info *bdi, int sync);
long congestion_wait(int sync, long timeout);
long wait_iff_congested(struct zone *zone, int sync, long timeout);
int pdflush_proc_obsolete(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
{
return bdi->capabilities & BDI_CAP_STABLE_WRITES;
}
static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
{
return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
}
static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
{
return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
}
static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
{
/* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
BDI_CAP_NO_WRITEBACK));
}
static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
{
return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
}
static inline bool mapping_cap_account_dirty(struct address_space *mapping)
{
return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
}
static inline int bdi_sched_wait(void *word)
{
schedule();
return 0;
}
#endif /* _LINUX_BACKING_DEV_H */