1/*
2 * libwebsockets - small server side websockets and web server implementation
3 *
4 * Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25#include <private-lib-core.h>
26#include "private-lib-misc-cache-ttl.h"
27
28#if defined(write)
29#undef write
30#endif
31
32static void
33update_sul(lws_cache_ttl_lru_t_heap_t *cache);
34
35static int
36lws_cache_heap_invalidate(struct lws_cache_ttl_lru *_c, const char *key);
37
38static int
39sort_expiry(const lws_dll2_t *a, const lws_dll2_t *b)
40{
41	const lws_cache_ttl_item_heap_t
42		*c = lws_container_of(a, lws_cache_ttl_item_heap_t, list_expiry),
43		*d = lws_container_of(b, lws_cache_ttl_item_heap_t, list_expiry);
44
45	if (c->expiry > d->expiry)
46		return 1;
47	if (c->expiry < d->expiry)
48		return -1;
49
50	return 0;
51}
52
53static void
54_lws_cache_heap_item_destroy(lws_cache_ttl_lru_t_heap_t *cache,
55			     lws_cache_ttl_item_heap_t *item)
56{
57	lwsl_cache("%s: %s (%s)\n", __func__, cache->cache.info.name,
58			(const char *)&item[1] + item->size);
59
60	lws_dll2_remove(&item->list_expiry);
61	lws_dll2_remove(&item->list_lru);
62
63	cache->cache.current_footprint -= item->size;
64
65	update_sul(cache);
66
67	if (cache->cache.info.cb)
68		cache->cache.info.cb((void *)((uint8_t *)&item[1]), item->size);
69
70	lws_free(item);
71}
72
73static void
74lws_cache_heap_item_destroy(lws_cache_ttl_lru_t_heap_t *cache,
75			    lws_cache_ttl_item_heap_t *item, int parent_too)
76{
77	struct lws_cache_ttl_lru *backing = &cache->cache;
78	const char *tag = ((const char *)&item[1]) + item->size;
79
80	/*
81	 * We're destroying a normal item?
82	 */
83
84	if (*tag == META_ITEM_LEADING)
85		/* no, nothing to check here then */
86		goto post;
87
88	if (backing->info.parent)
89		backing = backing->info.parent;
90
91	/*
92	 * We need to check any cached meta-results from lookups that
93	 * include this normal item, and if any, invalidate the meta-results
94	 * since they have to be recalculated before being used again.
95	 */
96
97	lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
98				   cache->items_lru.head) {
99		lws_cache_ttl_item_heap_t *i = lws_container_of(d,
100						lws_cache_ttl_item_heap_t,
101						list_lru);
102		const char *iname = ((const char *)&item[1]) + item->size;
103		uint8_t *pay = (uint8_t *)&item[1], *end = pay + item->size;
104
105		if (*iname == META_ITEM_LEADING) {
106			size_t taglen = strlen(iname);
107
108			/*
109			 * If the item about to be destroyed makes an
110			 * appearance on the meta results list, we must kill
111			 * the meta result item to force recalc next time
112			 */
113
114			while (pay < end) {
115				uint32_t tlen = lws_ser_ru32be(pay + 4);
116
117				if (tlen == taglen &&
118				    !strcmp((const char *)pay + 8, iname)) {
119#if defined(_DEBUG)
120					/*
121					 * Sanity check that the item tag is
122					 * really a match for that meta results
123					 * item
124					 */
125
126					assert (!backing->info.ops->tag_match(
127						 backing, iname + 1, tag, 1));
128#endif
129					_lws_cache_heap_item_destroy(cache, i);
130					break;
131				}
132				pay += 8 + tlen + 1;
133			}
134
135#if defined(_DEBUG)
136			/*
137			 * Sanity check that the item tag really isn't a match
138			 * for that meta results item
139			 */
140
141			assert (backing->info.ops->tag_match(backing, iname + 1,
142							  tag, 1));
143#endif
144		}
145
146	} lws_end_foreach_dll_safe(d, d1);
147
148post:
149	_lws_cache_heap_item_destroy(cache, item);
150}
151
152static void
153lws_cache_item_evict_lru(lws_cache_ttl_lru_t_heap_t *cache)
154{
155	lws_cache_ttl_item_heap_t *ei;
156
157	if (!cache->items_lru.head)
158		return;
159
160	ei = lws_container_of(cache->items_lru.head,
161			      lws_cache_ttl_item_heap_t, list_lru);
162
163	lws_cache_heap_item_destroy(cache, ei, 0);
164}
165
166/*
167 * We need to weed out expired entries in the backing file
168 */
169
170static void
171expiry_cb(lws_sorted_usec_list_t *sul)
172{
173	lws_cache_ttl_lru_t_heap_t *cache = lws_container_of(sul,
174					lws_cache_ttl_lru_t_heap_t, cache.sul);
175	lws_usec_t now = lws_now_usecs();
176
177	lwsl_cache("%s: %s\n", __func__, cache->cache.info.name);
178
179	while (cache->items_expiry.head) {
180		lws_cache_ttl_item_heap_t *item;
181
182		item = lws_container_of(cache->items_expiry.head,
183					lws_cache_ttl_item_heap_t, list_expiry);
184
185		if (item->expiry > now)
186			return;
187
188		lws_cache_heap_item_destroy(cache, item, 1);
189	}
190}
191
192/*
193 * Let's figure out what the earliest next expiry is
194 */
195
196static int
197earliest_expiry(lws_cache_ttl_lru_t_heap_t *cache, lws_usec_t *pearliest)
198{
199	lws_cache_ttl_item_heap_t *item;
200
201	if (!cache->items_expiry.head)
202		return 1;
203
204	item = lws_container_of(cache->items_expiry.head,
205				lws_cache_ttl_item_heap_t, list_expiry);
206
207	*pearliest = item->expiry;
208
209	return 0;
210}
211
212static void
213update_sul(lws_cache_ttl_lru_t_heap_t *cache)
214{
215	lws_usec_t earliest;
216
217	/* weed out any newly-expired */
218	expiry_cb(&cache->cache.sul);
219
220	/* figure out the next soonest expiring item */
221	if (earliest_expiry(cache, &earliest)) {
222		lws_sul_cancel(&cache->cache.sul);
223		return;
224	}
225
226	lwsl_debug("%s: setting exp %llu\n", __func__,
227			(unsigned long long)earliest);
228
229	if (earliest)
230		lws_cache_schedule(&cache->cache, expiry_cb, earliest);
231}
232
233static lws_cache_ttl_item_heap_t *
234lws_cache_heap_specific(lws_cache_ttl_lru_t_heap_t *cache,
235			const char *specific_key)
236{
237	lws_start_foreach_dll(struct lws_dll2 *, d, cache->items_lru.head) {
238		lws_cache_ttl_item_heap_t *item = lws_container_of(d,
239						lws_cache_ttl_item_heap_t,
240						list_lru);
241		const char *iname = ((const char *)&item[1]) + item->size;
242
243		if (!strcmp(specific_key, iname))
244			return item;
245
246	} lws_end_foreach_dll(d);
247
248	return NULL;
249}
250
251static int
252lws_cache_heap_tag_match(struct lws_cache_ttl_lru *cache, const char *wc,
253				const char *tag, char lookup_rules)
254{
255	return lws_strcmp_wildcard(wc, strlen(wc), tag, strlen(tag));
256}
257
258static int
259lws_cache_heap_lookup(struct lws_cache_ttl_lru *_c, const char *wildcard_key,
260		      lws_dll2_owner_t *results_owner)
261{
262	lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
263	size_t sklen = strlen(wildcard_key);
264
265	lws_start_foreach_dll(struct lws_dll2 *, d, cache->items_lru.head) {
266		lws_cache_ttl_item_heap_t *item = lws_container_of(d,
267						lws_cache_ttl_item_heap_t,
268						list_lru);
269		const char *iname = ((const char *)&item[1]) + item->size;
270
271		if (!lws_strcmp_wildcard(wildcard_key, sklen, iname,
272					 strlen(iname))) {
273			size_t ilen = strlen(iname);
274			lws_cache_match_t *m;
275			char hit = 0;
276
277			/*
278			 * It musn't already be on the list from an earlier
279			 * cache level
280			 */
281
282			lws_start_foreach_dll(struct lws_dll2 *, e,
283					results_owner->head) {
284				lws_cache_match_t *i = lws_container_of(e,
285							lws_cache_match_t, list);
286				if (i->tag_size == ilen &&
287				    !strcmp(iname, ((const char *)&i[1]))) {
288					hit = 1;
289					break;
290				}
291			} lws_end_foreach_dll(e);
292
293			if (!hit) {
294
295				/*
296				 * it's unique, instantiate a record for it
297				 */
298
299				m = lws_fi(&_c->info.cx->fic,
300					   "cache_lookup_oom") ? NULL :
301					lws_malloc(sizeof(*m) + ilen + 1,
302						   __func__);
303				if (!m) {
304					lws_cache_clear_matches(results_owner);
305					return 1;
306				}
307
308				memset(&m->list, 0, sizeof(m->list));
309				m->tag_size = ilen;
310				memcpy(&m[1], iname, ilen + 1);
311
312				lws_dll2_add_tail(&m->list, results_owner);
313			}
314		}
315
316	} lws_end_foreach_dll(d);
317
318	return 0;
319}
320
321static int
322lws_cache_heap_write(struct lws_cache_ttl_lru *_c, const char *specific_key,
323		     const uint8_t *source, size_t size, lws_usec_t expiry,
324		     void **ppvoid)
325{
326	lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
327	struct lws_cache_ttl_lru *backing = _c;
328	lws_cache_ttl_item_heap_t *item, *ei;
329	size_t kl = strlen(specific_key);
330	char *p;
331
332	lwsl_cache("%s: %s: len %d\n", __func__, _c->info.name, (int)size);
333
334	/*
335	 * Is this new tag going to invalidate any existing cached meta-results?
336	 *
337	 * If so, let's destroy any of those first to recover the heap
338	 */
339
340	if (backing->info.parent)
341		backing = backing->info.parent;
342
343	lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
344				   cache->items_lru.head) {
345		lws_cache_ttl_item_heap_t *i = lws_container_of(d,
346						lws_cache_ttl_item_heap_t,
347						list_lru);
348		const char *iname = ((const char *)&i[1]) + i->size;
349
350		if (*iname == META_ITEM_LEADING) {
351
352			/*
353			 * If the item about to be added would match any cached
354			 * results from before it was added, we have to
355			 * invalidate them.  To check this, we have to use the
356			 * matching rules at the backing store level
357			 */
358
359			if (!strcmp(iname + 1, specific_key))
360				_lws_cache_heap_item_destroy(cache, i);
361		}
362
363	} lws_end_foreach_dll_safe(d, d1);
364
365
366	/*
367	 * Keep us under the limit if possible... note this will always allow
368	 * caching a single large item even if it is above the limits
369	 */
370
371	while ((cache->cache.info.max_footprint &&
372	        cache->cache.current_footprint + size >
373					     cache->cache.info.max_footprint) ||
374	       (cache->cache.info.max_items &&
375		cache->items_lru.count + 1 > cache->cache.info.max_items))
376		lws_cache_item_evict_lru(cache);
377
378	/* remove any existing entry of the same key */
379
380	lws_cache_heap_invalidate(&cache->cache, specific_key);
381
382	item = lws_fi(&_c->info.cx->fic, "cache_write_oom") ? NULL :
383			lws_malloc(sizeof(*item) + kl + 1u + size, __func__);
384	if (!item)
385		return 1;
386
387	cache->cache.current_footprint += item->size;
388
389	/* only need to zero down our item object */
390	memset(item, 0, sizeof(*item));
391
392	p = (char *)&item[1];
393	if (ppvoid)
394		*ppvoid = p;
395
396	/* copy the payload into place */
397	if (source)
398		memcpy(p, source, size);
399
400	/* copy the key string into place, with terminating NUL */
401	memcpy(p + size, specific_key, kl + 1);
402
403	item->expiry = expiry;
404	item->key_len = kl;
405	item->size = size;
406
407	if (expiry) {
408		/* adding to expiry is optional, on nonzero expiry */
409		lws_dll2_add_sorted(&item->list_expiry, &cache->items_expiry,
410				    sort_expiry);
411		ei = lws_container_of(cache->items_expiry.head,
412				      lws_cache_ttl_item_heap_t, list_expiry);
413		lwsl_debug("%s: setting exp %llu\n", __func__,
414						(unsigned long long)ei->expiry);
415		lws_cache_schedule(&cache->cache, expiry_cb, ei->expiry);
416	}
417
418	/* always add outselves to head of lru list */
419	lws_dll2_add_head(&item->list_lru, &cache->items_lru);
420
421	return 0;
422}
423
424static int
425lws_cache_heap_get(struct lws_cache_ttl_lru *_c, const char *specific_key,
426		   const void **pdata, size_t *psize)
427{
428	lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
429	lws_cache_ttl_item_heap_t *item;
430
431	item = lws_cache_heap_specific(cache, specific_key);
432	if (!item)
433		return 1;
434
435	/* we are using it, move it to lru head */
436	lws_dll2_remove(&item->list_lru);
437	lws_dll2_add_head(&item->list_lru, &cache->items_lru);
438
439	if (pdata) {
440		*pdata = (const void *)&item[1];
441		*psize = item->size;
442	}
443
444	return 0;
445}
446
447static int
448lws_cache_heap_invalidate(struct lws_cache_ttl_lru *_c, const char *specific_key)
449{
450	lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
451	struct lws_cache_ttl_lru *backing = _c;
452	lws_cache_ttl_item_heap_t *item;
453	const void *user;
454	size_t size;
455
456	if (lws_cache_heap_get(_c, specific_key, &user, &size))
457		return 0;
458
459	if (backing->info.parent)
460		backing = backing->info.parent;
461
462	item = (lws_cache_ttl_item_heap_t *)(((uint8_t *)user) - sizeof(*item));
463
464	/*
465	 * We must invalidate any cached results that would have included this
466	 */
467
468	lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
469				   cache->items_lru.head) {
470		lws_cache_ttl_item_heap_t *i = lws_container_of(d,
471						lws_cache_ttl_item_heap_t,
472						list_lru);
473		const char *iname = ((const char *)&i[1]) + i->size;
474
475		if (*iname == META_ITEM_LEADING) {
476
477			/*
478			 * If the item about to be added would match any cached
479			 * results from before it was added, we have to
480			 * invalidate them.  To check this, we have to use the
481			 * matching rules at the backing store level
482			 */
483
484			if (!backing->info.ops->tag_match(backing, iname + 1,
485							  specific_key, 1))
486				_lws_cache_heap_item_destroy(cache, i);
487		}
488
489	} lws_end_foreach_dll_safe(d, d1);
490
491	lws_cache_heap_item_destroy(cache, item, 0);
492
493	return 0;
494}
495
496static struct lws_cache_ttl_lru *
497lws_cache_heap_create(const struct lws_cache_creation_info *info)
498{
499	lws_cache_ttl_lru_t_heap_t *cache;
500
501	assert(info->cx);
502	assert(info->name);
503
504	cache = lws_fi(&info->cx->fic, "cache_createfail") ? NULL :
505					lws_zalloc(sizeof(*cache), __func__);
506	if (!cache)
507		return NULL;
508
509	cache->cache.info = *info;
510	if (info->parent)
511		info->parent->child = &cache->cache;
512
513	// lwsl_cache("%s: create %s\n", __func__, info->name);
514
515	return (struct lws_cache_ttl_lru *)cache;
516}
517
518static int
519destroy_dll(struct lws_dll2 *d, void *user)
520{
521	lws_cache_ttl_lru_t *_c = (struct lws_cache_ttl_lru *)user;
522	lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
523	lws_cache_ttl_item_heap_t *item =
524		       lws_container_of(d, lws_cache_ttl_item_heap_t, list_lru);
525
526	lws_cache_heap_item_destroy(cache, item, 0);
527
528	return 0;
529}
530
531static int
532lws_cache_heap_expunge(struct lws_cache_ttl_lru *_c)
533{
534	lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
535
536	lws_dll2_foreach_safe(&cache->items_lru, cache, destroy_dll);
537
538	return 0;
539}
540
541static void
542lws_cache_heap_destroy(struct lws_cache_ttl_lru **_cache)
543{
544	lws_cache_ttl_lru_t *c = *_cache;
545	lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)c;
546
547	if (!cache)
548		return;
549
550	lws_sul_cancel(&c->sul);
551
552	lws_dll2_foreach_safe(&cache->items_lru, cache, destroy_dll);
553
554	lws_free_set_NULL(*_cache);
555}
556
557#if defined(_DEBUG)
558static int
559dump_dll(struct lws_dll2 *d, void *user)
560{
561	lws_cache_ttl_item_heap_t *item =
562		       lws_container_of(d, lws_cache_ttl_item_heap_t, list_lru);
563
564	lwsl_cache("  %s: size %d, exp %llu\n",
565		   (const char *)&item[1] + item->size,
566		   (int)item->size, (unsigned long long)item->expiry);
567
568	lwsl_hexdump_cache((const char *)&item[1], item->size);
569
570	return 0;
571}
572
573static void
574lws_cache_heap_debug_dump(struct lws_cache_ttl_lru *_c)
575{
576	lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
577#if !defined(LWS_WITH_NO_LOGS)
578	lws_cache_ttl_item_heap_t *item = NULL;
579
580	lws_dll2_t *d = cache->items_expiry.head;
581
582	if (d)
583		item = lws_container_of(d, lws_cache_ttl_item_heap_t,
584						list_expiry);
585
586	lwsl_cache("%s: %s: items %d, earliest %llu\n", __func__,
587			cache->cache.info.name, (int)cache->items_lru.count,
588			item ? (unsigned long long)item->expiry : 0ull);
589#endif
590
591	lws_dll2_foreach_safe(&cache->items_lru, cache, dump_dll);
592}
593#endif
594
595const struct lws_cache_ops lws_cache_ops_heap = {
596	.create			= lws_cache_heap_create,
597	.destroy		= lws_cache_heap_destroy,
598	.expunge		= lws_cache_heap_expunge,
599
600	.write			= lws_cache_heap_write,
601	.tag_match		= lws_cache_heap_tag_match,
602	.lookup			= lws_cache_heap_lookup,
603	.invalidate		= lws_cache_heap_invalidate,
604	.get			= lws_cache_heap_get,
605#if defined(_DEBUG)
606	.debug_dump		= lws_cache_heap_debug_dump,
607#endif
608};
609