Lines Matching refs:cxt

58 static void mark_page_used(struct mtdoops_context *cxt, int page)
60 set_bit(page, cxt->oops_page_used);
63 static void mark_page_unused(struct mtdoops_context *cxt, int page)
65 clear_bit(page, cxt->oops_page_used);
68 static int page_is_used(struct mtdoops_context *cxt, int page)
70 return test_bit(page, cxt->oops_page_used);
73 static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
75 struct mtd_info *mtd = cxt->mtd;
96 mark_page_unused(cxt, page);
101 static void mtdoops_inc_counter(struct mtdoops_context *cxt)
103 cxt->nextpage++;
104 if (cxt->nextpage >= cxt->oops_pages)
105 cxt->nextpage = 0;
106 cxt->nextcount++;
107 if (cxt->nextcount == 0xffffffff)
108 cxt->nextcount = 0;
110 if (page_is_used(cxt, cxt->nextpage)) {
111 schedule_work(&cxt->work_erase);
116 cxt->nextpage, cxt->nextcount);
122 struct mtdoops_context *cxt =
124 struct mtd_info *mtd = cxt->mtd;
131 mod = (cxt->nextpage * record_size) % mtd->erasesize;
133 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size);
134 if (cxt->nextpage >= cxt->oops_pages)
135 cxt->nextpage = 0;
138 while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) {
141 cxt->nextpage * record_size);
143 cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size);
144 if (cxt->nextpage >= cxt->oops_pages)
145 cxt->nextpage = 0;
146 if (i == cxt->oops_pages / (mtd->erasesize / record_size)) {
158 ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
162 cxt->nextpage, cxt->nextcount);
167 ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
176 static void mtdoops_write(struct mtdoops_context *cxt, int panic)
178 struct mtd_info *mtd = cxt->mtd;
184 hdr = cxt->oops_buf;
185 hdr[0] = cxt->nextcount;
189 ret = mtd_panic_write(mtd, cxt->nextpage * record_size,
190 record_size, &retlen, cxt->oops_buf);
196 ret = mtd_write(mtd, cxt->nextpage * record_size,
197 record_size, &retlen, cxt->oops_buf);
201 cxt->nextpage * record_size, retlen, record_size, ret);
202 mark_page_used(cxt, cxt->nextpage);
203 memset(cxt->oops_buf, 0xff, record_size);
205 mtdoops_inc_counter(cxt);
210 struct mtdoops_context *cxt =
213 mtdoops_write(cxt, 0);
216 static void find_next_position(struct mtdoops_context *cxt)
218 struct mtd_info *mtd = cxt->mtd;
223 for (page = 0; page < cxt->oops_pages; page++) {
227 mark_page_used(cxt, page);
239 mark_page_unused(cxt, page);
258 cxt->nextpage = cxt->oops_pages - 1;
259 cxt->nextcount = 0;
262 cxt->nextpage = maxpos;
263 cxt->nextcount = maxcount;
266 mtdoops_inc_counter(cxt);
272 struct mtdoops_context *cxt = container_of(dumper,
279 kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
284 mtdoops_write(cxt, 1);
287 schedule_work(&cxt->work_write);
293 struct mtdoops_context *cxt = &oops_cxt;
298 cxt->mtd_index = mtd->index;
300 if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
320 cxt->oops_page_used =
324 if (!cxt->oops_page_used) {
329 cxt->dump.max_reason = KMSG_DUMP_OOPS;
330 cxt->dump.dump = mtdoops_do_dump;
331 err = kmsg_dump_register(&cxt->dump);
334 vfree(cxt->oops_page_used);
335 cxt->oops_page_used = NULL;
339 cxt->mtd = mtd;
340 cxt->oops_pages = (int)mtd->size / record_size;
341 find_next_position(cxt);
347 struct mtdoops_context *cxt = &oops_cxt;
349 if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
352 if (kmsg_dump_unregister(&cxt->dump) < 0)
355 cxt->mtd = NULL;
356 flush_work(&cxt->work_erase);
357 flush_work(&cxt->work_write);
368 struct mtdoops_context *cxt = &oops_cxt;
386 cxt->mtd_index = -1;
389 cxt->mtd_index = mtd_index;
391 cxt->oops_buf = vmalloc(record_size);
392 if (!cxt->oops_buf) {
396 memset(cxt->oops_buf, 0xff, record_size);
398 INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
399 INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
407 struct mtdoops_context *cxt = &oops_cxt;
410 vfree(cxt->oops_buf);
411 vfree(cxt->oops_page_used);