Skip to content

Commit 5b81b1b

Browse files
amotinbehlendorf
authored andcommitted
Unify arc_prune_async() code
There is no sense to have separate implementations for FreeBSD and Linux. Make Linux code shared as more functional and just register FreeBSD-specific prune callback with arc_add_prune_callback() API. Aside of code cleanup this should fix excessive pruning on FreeBSD: https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=274698 Signed-off-by: Alexander Motin <[email protected]> Sponsored by: iXsystems, Inc.
1 parent f288fdb commit 5b81b1b

File tree

8 files changed

+87
-117
lines changed

8 files changed

+87
-117
lines changed

include/os/linux/zfs/sys/zpl.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ extern const struct file_operations zpl_file_operations;
5252
extern const struct file_operations zpl_dir_file_operations;
5353

5454
/* zpl_super.c */
55-
extern void zpl_prune_sb(int64_t nr_to_scan, void *arg);
55+
extern void zpl_prune_sb(uint64_t nr_to_scan, void *arg);
5656

5757
extern const struct super_operations zpl_super_operations;
5858
extern const struct export_operations zpl_export_operations;

include/sys/arc.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ typedef struct arc_prune arc_prune_t;
8181
typedef void arc_read_done_func_t(zio_t *zio, const zbookmark_phys_t *zb,
8282
const blkptr_t *bp, arc_buf_t *buf, void *priv);
8383
typedef void arc_write_done_func_t(zio_t *zio, arc_buf_t *buf, void *priv);
84-
typedef void arc_prune_func_t(int64_t bytes, void *priv);
84+
typedef void arc_prune_func_t(uint64_t bytes, void *priv);
8585

8686
/* Shared module parameters */
8787
extern int zfs_arc_average_blocksize;

include/sys/arc_impl.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -994,7 +994,6 @@ extern void arc_wait_for_eviction(uint64_t, boolean_t);
994994

995995
extern void arc_lowmem_init(void);
996996
extern void arc_lowmem_fini(void);
997-
extern void arc_prune_async(int64_t);
998997
extern int arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg);
999998
extern uint64_t arc_free_memory(void);
1000999
extern int64_t arc_available_memory(void);

module/os/freebsd/zfs/arc_os.c

Lines changed: 0 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -51,11 +51,6 @@
5151
#include <sys/vm.h>
5252
#include <sys/vmmeter.h>
5353

54-
#if __FreeBSD_version >= 1300139
55-
static struct sx arc_vnlru_lock;
56-
static struct vnode *arc_vnlru_marker;
57-
#endif
58-
5954
extern struct vfsops zfs_vfsops;
6055

6156
uint_t zfs_arc_free_target = 0;
@@ -151,53 +146,6 @@ arc_default_max(uint64_t min, uint64_t allmem)
151146
return (MAX(allmem * 5 / 8, size));
152147
}
153148

154-
/*
155-
* Helper function for arc_prune_async() it is responsible for safely
156-
* handling the execution of a registered arc_prune_func_t.
157-
*/
158-
static void
159-
arc_prune_task(void *arg)
160-
{
161-
int64_t nr_scan = (intptr_t)arg;
162-
163-
#ifndef __ILP32__
164-
if (nr_scan > INT_MAX)
165-
nr_scan = INT_MAX;
166-
#endif
167-
168-
#if __FreeBSD_version >= 1300139
169-
sx_xlock(&arc_vnlru_lock);
170-
vnlru_free_vfsops(nr_scan, &zfs_vfsops, arc_vnlru_marker);
171-
sx_xunlock(&arc_vnlru_lock);
172-
#else
173-
vnlru_free(nr_scan, &zfs_vfsops);
174-
#endif
175-
}
176-
177-
/*
178-
* Notify registered consumers they must drop holds on a portion of the ARC
179-
* buffered they reference. This provides a mechanism to ensure the ARC can
180-
* honor the arc_meta_limit and reclaim otherwise pinned ARC buffers. This
181-
* is analogous to dnlc_reduce_cache() but more generic.
182-
*
183-
* This operation is performed asynchronously so it may be safely called
184-
* in the context of the arc_reclaim_thread(). A reference is taken here
185-
* for each registered arc_prune_t and the arc_prune_task() is responsible
186-
* for releasing it once the registered arc_prune_func_t has completed.
187-
*/
188-
void
189-
arc_prune_async(int64_t adjust)
190-
{
191-
192-
#ifndef __LP64__
193-
if (adjust > INTPTR_MAX)
194-
adjust = INTPTR_MAX;
195-
#endif
196-
taskq_dispatch(arc_prune_taskq, arc_prune_task,
197-
(void *)(intptr_t)adjust, TQ_SLEEP);
198-
ARCSTAT_BUMP(arcstat_prune);
199-
}
200-
201149
uint64_t
202150
arc_all_memory(void)
203151
{
@@ -248,23 +196,13 @@ arc_lowmem_init(void)
248196
{
249197
arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL,
250198
EVENTHANDLER_PRI_FIRST);
251-
#if __FreeBSD_version >= 1300139
252-
arc_vnlru_marker = vnlru_alloc_marker();
253-
sx_init(&arc_vnlru_lock, "arc vnlru lock");
254-
#endif
255199
}
256200

257201
void
258202
arc_lowmem_fini(void)
259203
{
260204
if (arc_event_lowmem != NULL)
261205
EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem);
262-
#if __FreeBSD_version >= 1300139
263-
if (arc_vnlru_marker != NULL) {
264-
vnlru_free_marker(arc_vnlru_marker);
265-
sx_destroy(&arc_vnlru_lock);
266-
}
267-
#endif
268206
}
269207

270208
void

module/os/freebsd/zfs/zfs_vfsops.c

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2097,6 +2097,26 @@ zfs_vnodes_adjust_back(void)
20972097
#endif
20982098
}
20992099

2100+
#if __FreeBSD_version >= 1300139
2101+
static struct sx zfs_vnlru_lock;
2102+
static struct vnode *zfs_vnlru_marker;
2103+
#endif
2104+
static arc_prune_t *zfs_prune;
2105+
2106+
static void
2107+
zfs_prune_task(uint64_t nr_to_scan, void *arg __unused)
2108+
{
2109+
if (nr_to_scan > INT_MAX)
2110+
nr_to_scan = INT_MAX;
2111+
#if __FreeBSD_version >= 1300139
2112+
sx_xlock(&zfs_vnlru_lock);
2113+
vnlru_free_vfsops(nr_to_scan, &zfs_vfsops, zfs_vnlru_marker);
2114+
sx_xunlock(&zfs_vnlru_lock);
2115+
#else
2116+
vnlru_free(nr_to_scan, &zfs_vfsops);
2117+
#endif
2118+
}
2119+
21002120
void
21012121
zfs_init(void)
21022122
{
@@ -2123,11 +2143,23 @@ zfs_init(void)
21232143
dmu_objset_register_type(DMU_OST_ZFS, zpl_get_file_info);
21242144

21252145
zfsvfs_taskq = taskq_create("zfsvfs", 1, minclsyspri, 0, 0, 0);
2146+
2147+
#if __FreeBSD_version >= 1300139
2148+
zfs_vnlru_marker = vnlru_alloc_marker();
2149+
sx_init(&zfs_vnlru_lock, "zfs vnlru lock");
2150+
#endif
2151+
zfs_prune = arc_add_prune_callback(zfs_prune_task, NULL);
21262152
}
21272153

21282154
void
21292155
zfs_fini(void)
21302156
{
2157+
arc_remove_prune_callback(zfs_prune);
2158+
#if __FreeBSD_version >= 1300139
2159+
vnlru_free_marker(zfs_vnlru_marker);
2160+
sx_destroy(&zfs_vnlru_lock);
2161+
#endif
2162+
21312163
taskq_destroy(zfsvfs_taskq);
21322164
zfsctl_fini();
21332165
zfs_znode_fini();

module/os/linux/zfs/arc_os.c

Lines changed: 0 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -491,57 +491,6 @@ arc_unregister_hotplug(void)
491491
}
492492
#endif /* _KERNEL */
493493

494-
/*
495-
* Helper function for arc_prune_async() it is responsible for safely
496-
* handling the execution of a registered arc_prune_func_t.
497-
*/
498-
static void
499-
arc_prune_task(void *ptr)
500-
{
501-
arc_prune_t *ap = (arc_prune_t *)ptr;
502-
arc_prune_func_t *func = ap->p_pfunc;
503-
504-
if (func != NULL)
505-
func(ap->p_adjust, ap->p_private);
506-
507-
zfs_refcount_remove(&ap->p_refcnt, func);
508-
}
509-
510-
/*
511-
* Notify registered consumers they must drop holds on a portion of the ARC
512-
* buffered they reference. This provides a mechanism to ensure the ARC can
513-
* honor the arc_meta_limit and reclaim otherwise pinned ARC buffers. This
514-
* is analogous to dnlc_reduce_cache() but more generic.
515-
*
516-
* This operation is performed asynchronously so it may be safely called
517-
* in the context of the arc_reclaim_thread(). A reference is taken here
518-
* for each registered arc_prune_t and the arc_prune_task() is responsible
519-
* for releasing it once the registered arc_prune_func_t has completed.
520-
*/
521-
void
522-
arc_prune_async(int64_t adjust)
523-
{
524-
arc_prune_t *ap;
525-
526-
mutex_enter(&arc_prune_mtx);
527-
for (ap = list_head(&arc_prune_list); ap != NULL;
528-
ap = list_next(&arc_prune_list, ap)) {
529-
530-
if (zfs_refcount_count(&ap->p_refcnt) >= 2)
531-
continue;
532-
533-
zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc);
534-
ap->p_adjust = adjust;
535-
if (taskq_dispatch(arc_prune_taskq, arc_prune_task,
536-
ap, TQ_SLEEP) == TASKQID_INVALID) {
537-
zfs_refcount_remove(&ap->p_refcnt, ap->p_pfunc);
538-
continue;
539-
}
540-
ARCSTAT_BUMP(arcstat_prune);
541-
}
542-
mutex_exit(&arc_prune_mtx);
543-
}
544-
545494
/* BEGIN CSTYLED */
546495
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, shrinker_limit, INT, ZMOD_RW,
547496
"Limit on number of pages that ARC shrinker can reclaim at once");

module/os/linux/zfs/zpl_super.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -334,7 +334,7 @@ zpl_kill_sb(struct super_block *sb)
334334
}
335335

336336
void
337-
zpl_prune_sb(int64_t nr_to_scan, void *arg)
337+
zpl_prune_sb(uint64_t nr_to_scan, void *arg)
338338
{
339339
struct super_block *sb = (struct super_block *)arg;
340340
int objects = 0;

module/zfs/arc.c

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -868,6 +868,8 @@ static void l2arc_do_free_on_write(void);
868868
static void l2arc_hdr_arcstats_update(arc_buf_hdr_t *hdr, boolean_t incr,
869869
boolean_t state_only);
870870

871+
static void arc_prune_async(uint64_t adjust);
872+
871873
#define l2arc_hdr_arcstats_increment(hdr) \
872874
l2arc_hdr_arcstats_update((hdr), B_TRUE, B_FALSE)
873875
#define l2arc_hdr_arcstats_decrement(hdr) \
@@ -6521,6 +6523,56 @@ arc_remove_prune_callback(arc_prune_t *p)
65216523
kmem_free(p, sizeof (*p));
65226524
}
65236525

6526+
/*
6527+
* Helper function for arc_prune_async() it is responsible for safely
6528+
* handling the execution of a registered arc_prune_func_t.
6529+
*/
6530+
static void
6531+
arc_prune_task(void *ptr)
6532+
{
6533+
arc_prune_t *ap = (arc_prune_t *)ptr;
6534+
arc_prune_func_t *func = ap->p_pfunc;
6535+
6536+
if (func != NULL)
6537+
func(ap->p_adjust, ap->p_private);
6538+
6539+
zfs_refcount_remove(&ap->p_refcnt, func);
6540+
}
6541+
6542+
/*
6543+
* Notify registered consumers they must drop holds on a portion of the ARC
6544+
* buffers they reference. This provides a mechanism to ensure the ARC can
6545+
* honor the metadata limit and reclaim otherwise pinned ARC buffers.
6546+
*
6547+
* This operation is performed asynchronously so it may be safely called
6548+
* in the context of the arc_reclaim_thread(). A reference is taken here
6549+
* for each registered arc_prune_t and the arc_prune_task() is responsible
6550+
* for releasing it once the registered arc_prune_func_t has completed.
6551+
*/
6552+
static void
6553+
arc_prune_async(uint64_t adjust)
6554+
{
6555+
arc_prune_t *ap;
6556+
6557+
mutex_enter(&arc_prune_mtx);
6558+
for (ap = list_head(&arc_prune_list); ap != NULL;
6559+
ap = list_next(&arc_prune_list, ap)) {
6560+
6561+
if (zfs_refcount_count(&ap->p_refcnt) >= 2)
6562+
continue;
6563+
6564+
zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc);
6565+
ap->p_adjust = adjust;
6566+
if (taskq_dispatch(arc_prune_taskq, arc_prune_task,
6567+
ap, TQ_SLEEP) == TASKQID_INVALID) {
6568+
zfs_refcount_remove(&ap->p_refcnt, ap->p_pfunc);
6569+
continue;
6570+
}
6571+
ARCSTAT_BUMP(arcstat_prune);
6572+
}
6573+
mutex_exit(&arc_prune_mtx);
6574+
}
6575+
65246576
/*
65256577
* Notify the arc that a block was freed, and thus will never be used again.
65266578
*/

0 commit comments

Comments
 (0)