0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #ifdef CONFIG_DAMON_KUNIT_TEST
0011
0012 #ifndef _DAMON_CORE_TEST_H
0013 #define _DAMON_CORE_TEST_H
0014
0015 #include <kunit/test.h>
0016
0017 static void damon_test_regions(struct kunit *test)
0018 {
0019 struct damon_region *r;
0020 struct damon_target *t;
0021
0022 r = damon_new_region(1, 2);
0023 KUNIT_EXPECT_EQ(test, 1ul, r->ar.start);
0024 KUNIT_EXPECT_EQ(test, 2ul, r->ar.end);
0025 KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
0026
0027 t = damon_new_target();
0028 KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
0029
0030 damon_add_region(r, t);
0031 KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t));
0032
0033 damon_del_region(r, t);
0034 KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
0035
0036 damon_free_target(t);
0037 }
0038
0039 static unsigned int nr_damon_targets(struct damon_ctx *ctx)
0040 {
0041 struct damon_target *t;
0042 unsigned int nr_targets = 0;
0043
0044 damon_for_each_target(t, ctx)
0045 nr_targets++;
0046
0047 return nr_targets;
0048 }
0049
0050 static void damon_test_target(struct kunit *test)
0051 {
0052 struct damon_ctx *c = damon_new_ctx();
0053 struct damon_target *t;
0054
0055 t = damon_new_target();
0056 KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
0057
0058 damon_add_target(c, t);
0059 KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c));
0060
0061 damon_destroy_target(t);
0062 KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
0063
0064 damon_destroy_ctx(c);
0065 }
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077 static void damon_test_aggregate(struct kunit *test)
0078 {
0079 struct damon_ctx *ctx = damon_new_ctx();
0080 unsigned long saddr[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} };
0081 unsigned long eaddr[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} };
0082 unsigned long accesses[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} };
0083 struct damon_target *t;
0084 struct damon_region *r;
0085 int it, ir;
0086
0087 for (it = 0; it < 3; it++) {
0088 t = damon_new_target();
0089 damon_add_target(ctx, t);
0090 }
0091
0092 it = 0;
0093 damon_for_each_target(t, ctx) {
0094 for (ir = 0; ir < 3; ir++) {
0095 r = damon_new_region(saddr[it][ir], eaddr[it][ir]);
0096 r->nr_accesses = accesses[it][ir];
0097 damon_add_region(r, t);
0098 }
0099 it++;
0100 }
0101 kdamond_reset_aggregated(ctx);
0102 it = 0;
0103 damon_for_each_target(t, ctx) {
0104 ir = 0;
0105
0106 damon_for_each_region(r, t) {
0107 KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
0108 ir++;
0109 }
0110
0111 KUNIT_EXPECT_EQ(test, 3, ir);
0112 it++;
0113 }
0114
0115 KUNIT_EXPECT_EQ(test, 3, it);
0116
0117 damon_destroy_ctx(ctx);
0118 }
0119
0120 static void damon_test_split_at(struct kunit *test)
0121 {
0122 struct damon_ctx *c = damon_new_ctx();
0123 struct damon_target *t;
0124 struct damon_region *r;
0125
0126 t = damon_new_target();
0127 r = damon_new_region(0, 100);
0128 damon_add_region(r, t);
0129 damon_split_region_at(c, t, r, 25);
0130 KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
0131 KUNIT_EXPECT_EQ(test, r->ar.end, 25ul);
0132
0133 r = damon_next_region(r);
0134 KUNIT_EXPECT_EQ(test, r->ar.start, 25ul);
0135 KUNIT_EXPECT_EQ(test, r->ar.end, 100ul);
0136
0137 damon_free_target(t);
0138 damon_destroy_ctx(c);
0139 }
0140
0141 static void damon_test_merge_two(struct kunit *test)
0142 {
0143 struct damon_target *t;
0144 struct damon_region *r, *r2, *r3;
0145 int i;
0146
0147 t = damon_new_target();
0148 r = damon_new_region(0, 100);
0149 r->nr_accesses = 10;
0150 damon_add_region(r, t);
0151 r2 = damon_new_region(100, 300);
0152 r2->nr_accesses = 20;
0153 damon_add_region(r2, t);
0154
0155 damon_merge_two_regions(t, r, r2);
0156 KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
0157 KUNIT_EXPECT_EQ(test, r->ar.end, 300ul);
0158 KUNIT_EXPECT_EQ(test, r->nr_accesses, 16u);
0159
0160 i = 0;
0161 damon_for_each_region(r3, t) {
0162 KUNIT_EXPECT_PTR_EQ(test, r, r3);
0163 i++;
0164 }
0165 KUNIT_EXPECT_EQ(test, i, 1);
0166
0167 damon_free_target(t);
0168 }
0169
0170 static struct damon_region *__nth_region_of(struct damon_target *t, int idx)
0171 {
0172 struct damon_region *r;
0173 unsigned int i = 0;
0174
0175 damon_for_each_region(r, t) {
0176 if (i++ == idx)
0177 return r;
0178 }
0179
0180 return NULL;
0181 }
0182
0183 static void damon_test_merge_regions_of(struct kunit *test)
0184 {
0185 struct damon_target *t;
0186 struct damon_region *r;
0187 unsigned long sa[] = {0, 100, 114, 122, 130, 156, 170, 184};
0188 unsigned long ea[] = {100, 112, 122, 130, 156, 170, 184, 230};
0189 unsigned int nrs[] = {0, 0, 10, 10, 20, 30, 1, 2};
0190
0191 unsigned long saddrs[] = {0, 114, 130, 156, 170};
0192 unsigned long eaddrs[] = {112, 130, 156, 170, 230};
0193 int i;
0194
0195 t = damon_new_target();
0196 for (i = 0; i < ARRAY_SIZE(sa); i++) {
0197 r = damon_new_region(sa[i], ea[i]);
0198 r->nr_accesses = nrs[i];
0199 damon_add_region(r, t);
0200 }
0201
0202 damon_merge_regions_of(t, 9, 9999);
0203
0204 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u);
0205 for (i = 0; i < 5; i++) {
0206 r = __nth_region_of(t, i);
0207 KUNIT_EXPECT_EQ(test, r->ar.start, saddrs[i]);
0208 KUNIT_EXPECT_EQ(test, r->ar.end, eaddrs[i]);
0209 }
0210 damon_free_target(t);
0211 }
0212
0213 static void damon_test_split_regions_of(struct kunit *test)
0214 {
0215 struct damon_ctx *c = damon_new_ctx();
0216 struct damon_target *t;
0217 struct damon_region *r;
0218
0219 t = damon_new_target();
0220 r = damon_new_region(0, 22);
0221 damon_add_region(r, t);
0222 damon_split_regions_of(c, t, 2);
0223 KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u);
0224 damon_free_target(t);
0225
0226 t = damon_new_target();
0227 r = damon_new_region(0, 220);
0228 damon_add_region(r, t);
0229 damon_split_regions_of(c, t, 4);
0230 KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u);
0231 damon_free_target(t);
0232 damon_destroy_ctx(c);
0233 }
0234
0235 static void damon_test_ops_registration(struct kunit *test)
0236 {
0237 struct damon_ctx *c = damon_new_ctx();
0238 struct damon_operations ops, bak;
0239
0240
0241 KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_VADDR), 0);
0242 KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_PADDR), 0);
0243
0244
0245 ops.id = DAMON_OPS_VADDR;
0246 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
0247 ops.id = DAMON_OPS_PADDR;
0248 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
0249
0250
0251 KUNIT_EXPECT_EQ(test, damon_select_ops(c, NR_DAMON_OPS), -EINVAL);
0252
0253
0254 mutex_lock(&damon_ops_lock);
0255 bak = damon_registered_ops[DAMON_OPS_VADDR];
0256 damon_registered_ops[DAMON_OPS_VADDR] = (struct damon_operations){};
0257 mutex_unlock(&damon_ops_lock);
0258
0259 ops.id = DAMON_OPS_VADDR;
0260 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), 0);
0261
0262 mutex_lock(&damon_ops_lock);
0263 damon_registered_ops[DAMON_OPS_VADDR] = bak;
0264 mutex_unlock(&damon_ops_lock);
0265
0266
0267 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
0268 }
0269
0270 static struct kunit_case damon_test_cases[] = {
0271 KUNIT_CASE(damon_test_target),
0272 KUNIT_CASE(damon_test_regions),
0273 KUNIT_CASE(damon_test_aggregate),
0274 KUNIT_CASE(damon_test_split_at),
0275 KUNIT_CASE(damon_test_merge_two),
0276 KUNIT_CASE(damon_test_merge_regions_of),
0277 KUNIT_CASE(damon_test_split_regions_of),
0278 KUNIT_CASE(damon_test_ops_registration),
0279 {},
0280 };
0281
0282 static struct kunit_suite damon_test_suite = {
0283 .name = "damon",
0284 .test_cases = damon_test_cases,
0285 };
0286 kunit_test_suite(damon_test_suite);
0287
0288 #endif
0289
0290 #endif