/kernel/linux/linux-5.10/fs/ocfs2/dlm/ |
H A D | dlmrecovery.c | 107 mlog(0, "%s: changing dead_node from %u to %u\n", in dlm_set_reco_dead_node() 116 mlog(0, "%s: changing new_master from %u to %u\n", in dlm_set_reco_master() 153 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot); in dlm_dispatch_work() 190 mlog(0, "starting dlm recovery thread...\n"); in dlm_launch_recovery_thread() 206 mlog(0, "waiting for dlm recovery thread to exit\n"); in dlm_complete_recovery_thread() 242 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n", in dlm_print_reco_node_status() 275 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n", in dlm_print_reco_node_status() 279 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n", in dlm_print_reco_node_status() 292 mlog(0, "dlm thread running for %s...\n", dlm->name); in dlm_recovery_thread() 310 mlog( in dlm_recovery_thread() [all...] |
H A D | dlmunlock.c | 96 mlog(0, "master_node = %d, valblk = %d\n", master_node, in dlmunlock_common() 110 mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock " in dlmunlock_common() 119 mlog(ML_ERROR, "lockres in progress!\n"); in dlmunlock_common() 186 mlog(0, "%s:%.*s: clearing actions, %s\n", in dlmunlock_common() 218 mlog(0, "clearing convert_type at %smaster node\n", in dlmunlock_common() 251 mlog(0, "lock %u:%llu should be gone now! refs=%d\n", in dlmunlock_common() 323 mlog(0, "%.*s\n", res->lockname.len, res->lockname.name); in dlm_send_remote_unlock_request() 329 mlog(0, "%s:%.*s: this node became the master due to a " in dlm_send_remote_unlock_request() 357 mlog(0, "master was in-progress. retry\n"); in dlm_send_remote_unlock_request() 360 mlog(ML_ERRO in dlm_send_remote_unlock_request() [all...] |
H A D | dlmmaster.c | 224 mlog(ML_ERROR, "bad mle: %p\n", mle); in __dlm_put_mle() 367 mlog(0, "node %u already removed from nodemap!\n", idx); in dlm_mle_node_down() 381 mlog(0, "node %u already in node map!\n", idx); in dlm_mle_node_up() 416 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname, in dlm_mle_release() 478 mlog(0, "destroying lockres %.*s\n", res->lockname.len, in dlm_lockres_release() 490 mlog(ML_ERROR, in dlm_lockres_release() 608 mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len, in dlm_lockres_set_refmap_bit() 619 mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len, in dlm_lockres_clear_refmap_bit() 630 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, in __dlm_lockres_grab_inflight_ref() 651 mlog( in dlm_lockres_drop_inflight_ref() [all...] |
H A D | dlmdomain.c | 84 mlog(0, "Allocated DLM hash pagevec; %d pages (%lu expected), %lu buckets per page\n", in dlm_alloc_pagevec() 152 mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len, in __dlm_unhash_lockres() 171 mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len, in __dlm_insert_lockres() 183 mlog(0, "%.*s\n", len, name); in __dlm_lookup_lockres_full() 215 mlog(0, "%.*s\n", len, name); in __dlm_lookup_lockres() 323 mlog(0, "freeing memory from domain %s\n", dlm->name); in dlm_ctxt_release() 413 mlog(0, "Migrating locks from domain %s\n", dlm->name); in dlm_migrate_all_locks() 451 mlog(0, "%s: perhaps there are more lock resources " in dlm_migrate_all_locks() 455 mlog(0, "%s: we won't do dlm recovery after migrating " in dlm_migrate_all_locks() 467 mlog( in dlm_migrate_all_locks() [all...] |
H A D | dlmast.c | 88 mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, " in __dlm_queue_ast() 97 mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n", in __dlm_queue_ast() 108 mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n", in __dlm_queue_ast() 155 mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n", in __dlm_queue_bast() 189 mlog(0, "getting lvb from lockres for %s node\n", in dlm_update_lvb() 213 mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name, in dlm_do_local_ast() 233 mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name, in dlm_do_remote_ast() 257 mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n", in dlm_do_local_bast() 299 mlog(ML_ERROR, "Invalid name length (%d) in proxy ast " in dlm_proxy_ast_handler() 306 mlog(ML_ERRO in dlm_proxy_ast_handler() [all...] |
H A D | dlmconvert.c | 115 mlog(0, "type=%d, convert_type=%d, new convert_type=%d\n", in __dlmconvert_master() 122 mlog(ML_ERROR, "attempted to convert a lock with a lock " in __dlmconvert_master() 130 mlog(ML_ERROR, "attempted to convert a lock not on grant " in __dlmconvert_master() 140 mlog(0, "will set lvb: converting %s->%s\n", in __dlmconvert_master() 149 mlog(0, "will fetch new value into " in __dlmconvert_master() 155 mlog(0, "will NOT fetch new value " in __dlmconvert_master() 190 mlog(0, "res %.*s, granting %s lock\n", res->lockname.len, in __dlmconvert_master() 195 mlog(0, "doing in-place convert for nonlocal lock\n"); in __dlmconvert_master() 212 mlog(0, "failed to convert NOQUEUE lock %.*s from " in __dlmconvert_master() 218 mlog( in __dlmconvert_master() [all...] |
H A D | dlmlock.c | 110 mlog(0, "type=%d\n", lock->ml.type); in dlmlock_master() 127 mlog(0, "I can grant this lock right away\n"); in dlmlock_master() 144 mlog(0, "%s: returning DLM_NORMAL to " in dlmlock_master() 155 mlog(0, "%s: returning NOTQUEUED to " in dlmlock_master() 206 mlog(0, "type=%d, lockres %.*s, flags = 0x%x\n", in dlmlock_remote() 242 mlog(0, "%s: recovery lock was owned by " in dlmlock_remote() 263 mlog(0, "%s: $RECOVERY lock for this node (%u) is " in dlmlock_remote() 306 mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer " in dlm_send_remote_lock_request() 314 mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to " in dlm_send_remote_lock_request() 351 mlog( in dlm_lock_release() [all...] |
H A D | dlmthread.c | 116 mlog(0, "%s: Adding res %.*s to purge list\n", in __dlm_lockres_calc_usage() 125 mlog(0, "%s: Removing res %.*s from purge list\n", in __dlm_lockres_calc_usage() 159 mlog(0, "%s: Removing res %.*s from purgelist\n", in __dlm_do_purge_lockres() 167 mlog(ML_ERROR, "%s: res %.*s in use after deref\n", in __dlm_do_purge_lockres() 179 mlog(ML_ERROR, "%s: Resource %.*s not on the Tracking list\n", in __dlm_do_purge_lockres() 203 mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name, in dlm_purge_lockres() 208 mlog(ML_NOTICE, "%s: res %.*s already in DLM_LOCK_RES_DROPPING_REF state\n", in dlm_purge_lockres() 235 mlog(0, "%s: Removing res %.*s from purgelist, master %d\n", in dlm_purge_lockres() 243 mlog(0, "%s: deref %.*s in progress\n", in dlm_purge_lockres() 250 mlog(ML_ERRO in dlm_purge_lockres() [all...] |
/kernel/linux/linux-6.6/fs/ocfs2/dlm/ |
H A D | dlmrecovery.c | 105 mlog(0, "%s: changing dead_node from %u to %u\n", in dlm_set_reco_dead_node() 114 mlog(0, "%s: changing new_master from %u to %u\n", in dlm_set_reco_master() 144 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot); in dlm_dispatch_work() 181 mlog(0, "starting dlm recovery thread...\n"); in dlm_launch_recovery_thread() 197 mlog(0, "waiting for dlm recovery thread to exit\n"); in dlm_complete_recovery_thread() 233 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n", in dlm_print_reco_node_status() 266 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n", in dlm_print_reco_node_status() 270 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n", in dlm_print_reco_node_status() 283 mlog(0, "dlm thread running for %s...\n", dlm->name); in dlm_recovery_thread() 301 mlog( in dlm_recovery_thread() [all...] |
H A D | dlmunlock.c | 94 mlog(0, "master_node = %d, valblk = %d\n", master_node, in dlmunlock_common() 108 mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock " in dlmunlock_common() 117 mlog(ML_ERROR, "lockres in progress!\n"); in dlmunlock_common() 184 mlog(0, "%s:%.*s: clearing actions, %s\n", in dlmunlock_common() 216 mlog(0, "clearing convert_type at %smaster node\n", in dlmunlock_common() 249 mlog(0, "lock %u:%llu should be gone now! refs=%d\n", in dlmunlock_common() 321 mlog(0, "%.*s\n", res->lockname.len, res->lockname.name); in dlm_send_remote_unlock_request() 327 mlog(0, "%s:%.*s: this node became the master due to a " in dlm_send_remote_unlock_request() 355 mlog(0, "master was in-progress. retry\n"); in dlm_send_remote_unlock_request() 358 mlog(ML_ERRO in dlm_send_remote_unlock_request() [all...] |
H A D | dlmmaster.c | 222 mlog(ML_ERROR, "bad mle: %p\n", mle); in __dlm_put_mle() 365 mlog(0, "node %u already removed from nodemap!\n", idx); in dlm_mle_node_down() 379 mlog(0, "node %u already in node map!\n", idx); in dlm_mle_node_up() 414 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname, in dlm_mle_release() 476 mlog(0, "destroying lockres %.*s\n", res->lockname.len, in dlm_lockres_release() 488 mlog(ML_ERROR, in dlm_lockres_release() 606 mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len, in dlm_lockres_set_refmap_bit() 617 mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len, in dlm_lockres_clear_refmap_bit() 628 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, in __dlm_lockres_grab_inflight_ref() 649 mlog( in dlm_lockres_drop_inflight_ref() [all...] |
H A D | dlmdomain.c | 82 mlog(0, "Allocated DLM hash pagevec; %d pages (%lu expected), %lu buckets per page\n", in dlm_alloc_pagevec() 150 mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len, in __dlm_unhash_lockres() 169 mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len, in __dlm_insert_lockres() 181 mlog(0, "%.*s\n", len, name); in __dlm_lookup_lockres_full() 213 mlog(0, "%.*s\n", len, name); in __dlm_lookup_lockres() 321 mlog(0, "freeing memory from domain %s\n", dlm->name); in dlm_ctxt_release() 411 mlog(0, "Migrating locks from domain %s\n", dlm->name); in dlm_migrate_all_locks() 449 mlog(0, "%s: perhaps there are more lock resources " in dlm_migrate_all_locks() 453 mlog(0, "%s: we won't do dlm recovery after migrating " in dlm_migrate_all_locks() 465 mlog( in dlm_migrate_all_locks() [all...] |
H A D | dlmconvert.c | 113 mlog(0, "type=%d, convert_type=%d, new convert_type=%d\n", in __dlmconvert_master() 120 mlog(ML_ERROR, "attempted to convert a lock with a lock " in __dlmconvert_master() 128 mlog(ML_ERROR, "attempted to convert a lock not on grant " in __dlmconvert_master() 138 mlog(0, "will set lvb: converting %s->%s\n", in __dlmconvert_master() 147 mlog(0, "will fetch new value into " in __dlmconvert_master() 153 mlog(0, "will NOT fetch new value " in __dlmconvert_master() 188 mlog(0, "res %.*s, granting %s lock\n", res->lockname.len, in __dlmconvert_master() 193 mlog(0, "doing in-place convert for nonlocal lock\n"); in __dlmconvert_master() 210 mlog(0, "failed to convert NOQUEUE lock %.*s from " in __dlmconvert_master() 216 mlog( in __dlmconvert_master() [all...] |
H A D | dlmast.c | 86 mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, " in __dlm_queue_ast() 95 mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n", in __dlm_queue_ast() 106 mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n", in __dlm_queue_ast() 153 mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n", in __dlm_queue_bast() 177 mlog(0, "getting lvb from lockres for %s node\n", in dlm_update_lvb() 201 mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name, in dlm_do_local_ast() 221 mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name, in dlm_do_remote_ast() 245 mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n", in dlm_do_local_bast() 287 mlog(ML_ERROR, "Invalid name length (%d) in proxy ast " in dlm_proxy_ast_handler() 294 mlog(ML_ERRO in dlm_proxy_ast_handler() [all...] |
H A D | dlmlock.c | 108 mlog(0, "type=%d\n", lock->ml.type); in dlmlock_master() 125 mlog(0, "I can grant this lock right away\n"); in dlmlock_master() 142 mlog(0, "%s: returning DLM_NORMAL to " in dlmlock_master() 153 mlog(0, "%s: returning NOTQUEUED to " in dlmlock_master() 204 mlog(0, "type=%d, lockres %.*s, flags = 0x%x\n", in dlmlock_remote() 240 mlog(0, "%s: recovery lock was owned by " in dlmlock_remote() 261 mlog(0, "%s: $RECOVERY lock for this node (%u) is " in dlmlock_remote() 304 mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer " in dlm_send_remote_lock_request() 312 mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to " in dlm_send_remote_lock_request() 349 mlog( in dlm_lock_release() [all...] |
H A D | dlmthread.c | 114 mlog(0, "%s: Adding res %.*s to purge list\n", in __dlm_lockres_calc_usage() 123 mlog(0, "%s: Removing res %.*s from purge list\n", in __dlm_lockres_calc_usage() 157 mlog(0, "%s: Removing res %.*s from purgelist\n", in __dlm_do_purge_lockres() 165 mlog(ML_ERROR, "%s: res %.*s in use after deref\n", in __dlm_do_purge_lockres() 177 mlog(ML_ERROR, "%s: Resource %.*s not on the Tracking list\n", in __dlm_do_purge_lockres() 201 mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name, in dlm_purge_lockres() 206 mlog(ML_NOTICE, "%s: res %.*s already in DLM_LOCK_RES_DROPPING_REF state\n", in dlm_purge_lockres() 233 mlog(0, "%s: Removing res %.*s from purgelist, master %d\n", in dlm_purge_lockres() 241 mlog(0, "%s: deref %.*s in progress\n", in dlm_purge_lockres() 248 mlog(ML_ERRO in dlm_purge_lockres() [all...] |
/kernel/linux/linux-5.10/fs/ocfs2/cluster/ |
H A D | quorum.c | 104 mlog(0, "heartbeating: %d, connected: %d, " in o2quo_make_decision() 117 mlog(ML_ERROR, "fencing this node because it is " in o2quo_make_decision() 131 mlog(ML_ERROR, "fencing this node because it is " in o2quo_make_decision() 140 mlog(ML_ERROR, "fencing this node because it is " in o2quo_make_decision() 154 mlog(ML_NOTICE, "not fencing this node, heartbeating: %d, " in o2quo_make_decision() 172 mlog(0, "node %u, %d total\n", node, qs->qs_holds); in o2quo_set_hold() 181 mlog(0, "node %u, %d total\n", node, qs->qs_holds - 1); in o2quo_clear_hold() 209 mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating); in o2quo_hb_up() 234 mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating); in o2quo_hb_down() 252 mlog( in o2quo_hb_still_up() [all...] |
H A D | heartbeat.c | 292 mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " in o2hb_write_timeout() 306 mlog(ML_HEARTBEAT, "Number of regions %d, failed regions %d\n", in o2hb_write_timeout() 326 mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n", in o2hb_arm_timeout() 415 mlog(ML_HEARTBEAT, "send NEGO_APPROVE msg to node %d\n", i); in o2hb_nego_timeout() 419 mlog(ML_ERROR, "send NEGO_APPROVE msg to node %d fail %d\n", in o2hb_nego_timeout() 430 mlog(ML_ERROR, "send NEGO_TIMEOUT msg to node %d fail %d\n", in o2hb_nego_timeout() 447 mlog(ML_ERROR, "got nego timeout message from bad node.\n"); in o2hb_nego_timeout_handler() 495 mlog(ML_ERROR, "IO Error %d\n", bio->bi_status); in o2hb_bio_end_io() 525 mlog(ML_ERROR, "Could not alloc slots BIO!\n"); in o2hb_setup_one_bio() 545 mlog(ML_HB_BI in o2hb_setup_one_bio() [all...] |
/kernel/linux/linux-6.6/fs/ocfs2/cluster/ |
H A D | quorum.c | 102 mlog(0, "heartbeating: %d, connected: %d, " in o2quo_make_decision() 115 mlog(ML_ERROR, "fencing this node because it is " in o2quo_make_decision() 129 mlog(ML_ERROR, "fencing this node because it is " in o2quo_make_decision() 138 mlog(ML_ERROR, "fencing this node because it is " in o2quo_make_decision() 152 mlog(ML_NOTICE, "not fencing this node, heartbeating: %d, " in o2quo_make_decision() 170 mlog(0, "node %u, %d total\n", node, qs->qs_holds); in o2quo_set_hold() 179 mlog(0, "node %u, %d total\n", node, qs->qs_holds - 1); in o2quo_clear_hold() 207 mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating); in o2quo_hb_up() 232 mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating); in o2quo_hb_down() 250 mlog( in o2quo_hb_still_up() [all...] |
H A D | heartbeat.c | 288 mlog(ML_ERROR, "Heartbeat write timeout to device %pg after %u " in o2hb_write_timeout() 302 mlog(ML_HEARTBEAT, "Number of regions %d, failed regions %d\n", in o2hb_write_timeout() 322 mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n", in o2hb_arm_timeout() 411 mlog(ML_HEARTBEAT, "send NEGO_APPROVE msg to node %d\n", i); in o2hb_nego_timeout() 415 mlog(ML_ERROR, "send NEGO_APPROVE msg to node %d fail %d\n", in o2hb_nego_timeout() 426 mlog(ML_ERROR, "send NEGO_TIMEOUT msg to node %d fail %d\n", in o2hb_nego_timeout() 443 mlog(ML_ERROR, "got nego timeout message from bad node.\n"); in o2hb_nego_timeout_handler() 491 mlog(ML_ERROR, "IO Error %d\n", bio->bi_status); in o2hb_bio_end_io() 520 mlog(ML_ERROR, "Could not alloc slots BIO!\n"); in o2hb_setup_one_bio() 538 mlog(ML_HB_BI in o2hb_setup_one_bio() [all...] |
/kernel/linux/linux-5.10/fs/ocfs2/dlmfs/ |
H A D | userdlm.c | 94 mlog(ML_ERROR, "Dlm error %d while calling %s on " \ 118 mlog(ML_BASTS, "AST fired for lockres %.*s, level %d => %d\n", in user_ast() 126 mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n", in user_ast() 205 mlog(ML_BASTS, "BAST fired for lockres %.*s, blocking %d, level %d\n", in user_bast() 223 mlog(ML_BASTS, "UNLOCK AST fired for lockres %.*s, flags 0x%x\n", in user_unlock_ast() 227 mlog(ML_ERROR, "dlm returns status %d\n", status); in user_unlock_ast() 293 mlog(0, "lockres %.*s\n", lockres->l_namelen, lockres->l_name); in user_dlm_unblock_lock() 311 mlog(ML_BASTS, "lockres %.*s USER_LOCK_BLOCKED\n", in user_dlm_unblock_lock() 318 mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_TEARDOWN\n", in user_dlm_unblock_lock() 326 mlog(ML_BAST in user_dlm_unblock_lock() [all...] |
H A D | dlmfs.c | 128 mlog(0, "open called on inode %lu, flags 0x%x\n", inode->i_ino, in dlmfs_file_open() 175 mlog(0, "close called on inode %lu\n", inode->i_ino); in dlmfs_file_release() 246 mlog(0, "inode %lu, count = %zu, *ppos = %llu\n", in dlmfs_file_write() 265 mlog(0, "wrote %zu bytes\n", count); in dlmfs_file_write() 303 mlog(0, "inode %lu\n", inode->i_ino); in dlmfs_evict_inode() 315 mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn); in dlmfs_evict_inode() 408 mlog(0, "mkdir %.*s\n", domain->len, domain->name); in dlmfs_mkdir() 413 mlog(ML_ERROR, "invalid domain name for directory.\n"); in dlmfs_mkdir() 429 mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n", in dlmfs_mkdir() 455 mlog( in dlmfs_create() [all...] |
/kernel/linux/linux-6.6/fs/ocfs2/dlmfs/ |
H A D | userdlm.c | 92 mlog(ML_ERROR, "Dlm error %d while calling %s on " \ 116 mlog(ML_BASTS, "AST fired for lockres %.*s, level %d => %d\n", in user_ast() 124 mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n", in user_ast() 203 mlog(ML_BASTS, "BAST fired for lockres %.*s, blocking %d, level %d\n", in user_bast() 221 mlog(ML_BASTS, "UNLOCK AST fired for lockres %.*s, flags 0x%x\n", in user_unlock_ast() 225 mlog(ML_ERROR, "dlm returns status %d\n", status); in user_unlock_ast() 291 mlog(0, "lockres %.*s\n", lockres->l_namelen, lockres->l_name); in user_dlm_unblock_lock() 309 mlog(ML_BASTS, "lockres %.*s USER_LOCK_BLOCKED\n", in user_dlm_unblock_lock() 316 mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_TEARDOWN\n", in user_dlm_unblock_lock() 324 mlog(ML_BAST in user_dlm_unblock_lock() [all...] |
/kernel/linux/linux-5.10/fs/ocfs2/ |
H A D | super.c | 465 mlog(ML_ERROR, "Unable to load system inode %d, " in ocfs2_init_global_system_inodes() 494 mlog(ML_ERROR, "status=%d, sysfile=%d, slot=%d\n", in ocfs2_init_local_system_inodes() 626 mlog(ML_ERROR, "Cannot change heartbeat mode on remount\n"); in ocfs2_remount() 633 mlog(ML_ERROR, "Cannot change data mode on remount\n"); in ocfs2_remount() 642 mlog(ML_ERROR, "Cannot enable inode64 on remount\n"); in ocfs2_remount() 658 mlog(ML_ERROR, "Remount on readonly device is forbidden.\n"); in ocfs2_remount() 668 mlog(ML_ERROR, "Cannot remount RDWR " in ocfs2_remount() 675 mlog(ML_ERROR, "Cannot remount RDWR because " in ocfs2_remount() 739 mlog(ML_ERROR, "Hardware sector size too large: %d (max=%d)\n", in ocfs2_sb_probe() 757 mlog(ML_ERRO in ocfs2_sb_probe() [all...] |
/kernel/linux/linux-6.6/fs/ocfs2/ |
H A D | super.c | 462 mlog(ML_ERROR, "Unable to load system inode %d, " in ocfs2_init_global_system_inodes() 491 mlog(ML_ERROR, "status=%d, sysfile=%d, slot=%d\n", in ocfs2_init_local_system_inodes() 623 mlog(ML_ERROR, "Cannot change heartbeat mode on remount\n"); in ocfs2_remount() 630 mlog(ML_ERROR, "Cannot change data mode on remount\n"); in ocfs2_remount() 639 mlog(ML_ERROR, "Cannot enable inode64 on remount\n"); in ocfs2_remount() 655 mlog(ML_ERROR, "Remount on readonly device is forbidden.\n"); in ocfs2_remount() 665 mlog(ML_ERROR, "Cannot remount RDWR " in ocfs2_remount() 672 mlog(ML_ERROR, "Cannot remount RDWR because " in ocfs2_remount() 736 mlog(ML_ERROR, "Hardware sector size too large: %d (max=%d)\n", in ocfs2_sb_probe() 754 mlog(ML_ERRO in ocfs2_sb_probe() [all...] |