Lines Matching defs:resource
177 spin_lock_irq(&connection->resource->req_lock);
239 spin_unlock_irq(&connection->resource->req_lock);
244 spin_unlock_irq(&connection->resource->req_lock);
257 /* must hold resource->req_lock */
271 spin_lock_irq(&connection->resource->req_lock);
273 spin_unlock_irq(&connection->resource->req_lock);
298 spin_lock_irq(&connection->resource->req_lock);
306 spin_unlock_irq(&connection->resource->req_lock);
312 struct drbd_resource *resource = thi->resource;
318 resource->name);
338 drbd_info(resource, "Restarting %s thread\n", thi->name);
350 drbd_info(resource, "Terminating %s\n", current->comm);
356 kref_put(&resource->kref, drbd_destroy_resource);
361 static void drbd_thread_init(struct drbd_resource *resource, struct drbd_thread *thi,
368 thi->resource = resource;
375 struct drbd_resource *resource = thi->resource;
385 drbd_info(resource, "Starting %s thread (from %s [%d])\n",
390 drbd_err(resource, "Failed to get module reference in drbd_thread_start\n");
395 kref_get(&resource->kref);
406 "drbd_%c_%s", thi->name[0], thi->resource->name);
409 drbd_err(resource, "Couldn't start thread\n");
413 kref_put(&resource->kref, drbd_destroy_resource);
425 drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
492 * Forces all threads of a resource onto the same CPU. This is beneficial for
502 struct drbd_resource *resource;
506 for_each_resource_rcu(resource, &drbd_resources) {
507 for_each_cpu(cpu, resource->cpu_mask)
535 struct drbd_resource *resource = thi->resource;
541 set_cpus_allowed_ptr(p, resource->cpu_mask);
1896 spin_lock_irqsave(&device->resource->req_lock, flags);
1909 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1924 /* need to hold resource->req_lock */
2196 struct drbd_resource *resource = device->resource;
2225 /* not for_each_connection(connection, resource):
2235 kref_put(&resource->kref, drbd_destroy_resource);
2289 * resource (replication group) or per device (minor) retry
2301 * holds resource->req_lock */
2319 struct drbd_resource *resource =
2322 idr_destroy(&resource->devices);
2323 free_cpumask_var(resource->cpu_mask);
2324 kfree(resource->name);
2325 kfree(resource);
2328 void drbd_free_resource(struct drbd_resource *resource)
2332 for_each_connection_safe(connection, tmp, resource) {
2337 drbd_debugfs_resource_cleanup(resource);
2338 kref_put(&resource->kref, drbd_destroy_resource);
2345 struct drbd_resource *resource, *tmp;
2367 for_each_resource_safe(resource, tmp, &drbd_resources) {
2368 list_del(&resource->resources);
2369 drbd_free_resource(resource);
2415 struct drbd_resource *resource;
2421 for_each_resource_rcu(resource, &drbd_resources) {
2422 if (!strcmp(resource->name, name)) {
2423 kref_get(&resource->kref);
2427 resource = NULL;
2430 return resource;
2436 struct drbd_resource *resource;
2440 for_each_resource_rcu(resource, &drbd_resources) {
2441 for_each_connection_rcu(connection, resource) {
2495 int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts)
2514 drbd_warn(resource, "Overflow in bitmap_parse(%.12s%s), truncating to %u bits\n",
2523 drbd_warn(resource, "bitmap_parse() failed with %d\n", err);
2528 resource->res_opts = *res_opts;
2531 if (!cpumask_equal(resource->cpu_mask, new_cpu_mask)) {
2532 cpumask_copy(resource->cpu_mask, new_cpu_mask);
2533 for_each_connection_rcu(connection, resource) {
2549 struct drbd_resource *resource;
2551 resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL);
2552 if (!resource)
2554 resource->name = kstrdup(name, GFP_KERNEL);
2555 if (!resource->name)
2557 if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL))
2559 kref_init(&resource->kref);
2560 idr_init(&resource->devices);
2561 INIT_LIST_HEAD(&resource->connections);
2562 resource->write_ordering = WO_BDEV_FLUSH;
2563 list_add_tail_rcu(&resource->resources, &drbd_resources);
2564 mutex_init(&resource->conf_update);
2565 mutex_init(&resource->adm_mutex);
2566 spin_lock_init(&resource->req_lock);
2567 drbd_debugfs_resource_add(resource);
2568 return resource;
2571 kfree(resource->name);
2573 kfree(resource);
2581 struct drbd_resource *resource;
2607 resource = drbd_create_resource(name);
2608 if (!resource)
2620 drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver");
2622 drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
2624 drbd_thread_init(resource, &connection->ack_receiver, drbd_ack_receiver, "ack_recv");
2629 connection->resource = resource;
2631 if (set_resource_options(resource, res_opts))
2634 kref_get(&resource->kref);
2635 list_add_tail_rcu(&connection->connections, &resource->connections);
2640 list_del(&resource->resources);
2641 drbd_free_resource(resource);
2653 struct drbd_resource *resource = connection->resource;
2666 kref_put(&resource->kref, drbd_destroy_resource);
2685 struct drbd_resource *resource = adm_ctx->resource;
2704 kref_get(&resource->kref);
2705 device->resource = resource;
2751 id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
2761 for_each_connection(connection, resource) {
2791 device->state.conn = first_connection(resource)->cstate;
2805 for_each_connection_safe(connection, n, resource) {
2814 idr_remove(&resource->devices, vnr);
2825 kref_put(&resource->kref, drbd_destroy_resource);
2832 struct drbd_resource *resource = device->resource;
2840 for_each_connection(connection, resource) {
2844 idr_remove(&resource->devices, device->vnr);
3291 spin_lock_irq(&device->resource->req_lock);
3298 spin_unlock_irq(&device->resource->req_lock);
3527 spin_lock_irq(&device->resource->req_lock);
3536 spin_unlock_irq(&device->resource->req_lock);
3697 spin_unlock_irq(&device->resource->req_lock);
3700 spin_lock_irq(&device->resource->req_lock);
3710 struct drbd_resource *resource;
3715 for_each_resource(resource, &drbd_resources)
3716 spin_lock_nested(&resource->req_lock, i++);
3721 struct drbd_resource *resource;
3723 for_each_resource(resource, &drbd_resources)
3724 spin_unlock(&resource->req_lock);