| 1 | /* |
|---|
| 2 | * include/linux/backing-dev.h |
|---|
| 3 | * |
|---|
| 4 | * low-level device information and state which is propagated up through |
|---|
| 5 | * to high-level code. |
|---|
| 6 | */ |
|---|
| 7 | |
|---|
| 8 | #ifndef _LINUX_BACKING_DEV_H |
|---|
| 9 | #define _LINUX_BACKING_DEV_H |
|---|
| 10 | |
|---|
| 11 | |
|---|
| 12 | /* |
|---|
| 13 | * Bits in backing_dev_info.state |
|---|
| 14 | */ |
|---|
| 15 | enum bdi_state { |
|---|
| 16 | BDI_pdflush, /* A pdflush thread is working this device */ |
|---|
| 17 | BDI_write_congested, /* The write queue is getting full */ |
|---|
| 18 | BDI_read_congested, /* The read queue is getting full */ |
|---|
| 19 | BDI_unused, /* Available bits start here */ |
|---|
| 20 | }; |
|---|
| 21 | |
|---|
| 22 | typedef int (congested_fn)(void *, int); |
|---|
| 23 | |
|---|
| 24 | struct backing_dev_info { |
|---|
| 25 | unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ |
|---|
| 26 | unsigned long state; /* Always use atomic bitops on this */ |
|---|
| 27 | unsigned int capabilities; /* Device capabilities */ |
|---|
| 28 | congested_fn *congested_fn; /* Function pointer if device is md/dm */ |
|---|
| 29 | void *congested_data; /* Pointer to aux data for congested func */ |
|---|
| 30 | void (*unplug_io_fn)(struct backing_dev_info *, struct page *); |
|---|
| 31 | void *unplug_io_data; |
|---|
| 32 | }; |
|---|
| 33 | |
|---|
| 34 | |
|---|
| 35 | /* |
|---|
| 36 | * Flags in backing_dev_info::capability |
|---|
| 37 | * - The first two flags control whether dirty pages will contribute to the |
|---|
| 38 | * VM's accounting and whether writepages() should be called for dirty pages |
|---|
| 39 | * (something that would not, for example, be appropriate for ramfs) |
|---|
| 40 | * - These flags let !MMU mmap() govern direct device mapping vs immediate |
|---|
| 41 | * copying more easily for MAP_PRIVATE, especially for ROM filesystems |
|---|
| 42 | */ |
|---|
| 43 | #define BDI_CAP_NO_ACCT_DIRTY 0x00000001 /* Dirty pages shouldn't contribute to accounting */ |
|---|
| 44 | #define BDI_CAP_NO_WRITEBACK 0x00000002 /* Don't write pages back */ |
|---|
| 45 | #define BDI_CAP_MAP_COPY 0x00000004 /* Copy can be mapped (MAP_PRIVATE) */ |
|---|
| 46 | #define BDI_CAP_MAP_DIRECT 0x00000008 /* Can be mapped directly (MAP_SHARED) */ |
|---|
| 47 | #define BDI_CAP_READ_MAP 0x00000010 /* Can be mapped for reading */ |
|---|
| 48 | #define BDI_CAP_WRITE_MAP 0x00000020 /* Can be mapped for writing */ |
|---|
| 49 | #define BDI_CAP_EXEC_MAP 0x00000040 /* Can be mapped for execution */ |
|---|
| 50 | #define BDI_CAP_VMFLAGS \ |
|---|
| 51 | (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP) |
|---|
| 52 | |
|---|
| 53 | #if defined(VM_MAYREAD) && \ |
|---|
| 54 | (BDI_CAP_READ_MAP != VM_MAYREAD || \ |
|---|
| 55 | BDI_CAP_WRITE_MAP != VM_MAYWRITE || \ |
|---|
| 56 | BDI_CAP_EXEC_MAP != VM_MAYEXEC) |
|---|
| 57 | #error please change backing_dev_info::capabilities flags |
|---|
| 58 | #endif |
|---|
| 59 | |
|---|
| 60 | extern struct backing_dev_info default_backing_dev_info; |
|---|
| 61 | void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page); |
|---|
| 62 | |
|---|
| 63 | int writeback_acquire(struct backing_dev_info *bdi); |
|---|
| 64 | int writeback_in_progress(struct backing_dev_info *bdi); |
|---|
| 65 | void writeback_release(struct backing_dev_info *bdi); |
|---|
| 66 | |
|---|
| 67 | static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits) |
|---|
| 68 | { |
|---|
| 69 | if (bdi->congested_fn) |
|---|
| 70 | return bdi->congested_fn(bdi->congested_data, bdi_bits); |
|---|
| 71 | return (bdi->state & bdi_bits); |
|---|
| 72 | } |
|---|
| 73 | |
|---|
| 74 | static inline int bdi_read_congested(struct backing_dev_info *bdi) |
|---|
| 75 | { |
|---|
| 76 | return bdi_congested(bdi, 1 << BDI_read_congested); |
|---|
| 77 | } |
|---|
| 78 | |
|---|
| 79 | static inline int bdi_write_congested(struct backing_dev_info *bdi) |
|---|
| 80 | { |
|---|
| 81 | return bdi_congested(bdi, 1 << BDI_write_congested); |
|---|
| 82 | } |
|---|
| 83 | |
|---|
| 84 | static inline int bdi_rw_congested(struct backing_dev_info *bdi) |
|---|
| 85 | { |
|---|
| 86 | return bdi_congested(bdi, (1 << BDI_read_congested)| |
|---|
| 87 | (1 << BDI_write_congested)); |
|---|
| 88 | } |
|---|
| 89 | |
|---|
| 90 | #define bdi_cap_writeback_dirty(bdi) \ |
|---|
| 91 | (!((bdi)->capabilities & BDI_CAP_NO_WRITEBACK)) |
|---|
| 92 | |
|---|
| 93 | #define bdi_cap_account_dirty(bdi) \ |
|---|
| 94 | (!((bdi)->capabilities & BDI_CAP_NO_ACCT_DIRTY)) |
|---|
| 95 | |
|---|
| 96 | #define mapping_cap_writeback_dirty(mapping) \ |
|---|
| 97 | bdi_cap_writeback_dirty((mapping)->backing_dev_info) |
|---|
| 98 | |
|---|
| 99 | #define mapping_cap_account_dirty(mapping) \ |
|---|
| 100 | bdi_cap_account_dirty((mapping)->backing_dev_info) |
|---|
| 101 | |
|---|
| 102 | |
|---|
| 103 | #endif /* _LINUX_BACKING_DEV_H */ |
|---|