1// SPDX-License-Identifier: GPL-2.0 2/* Shared Memory Communications Direct over ISM devices (SMC-D) 3 * 4 * Functions for ISM device. 5 * 6 * Copyright IBM Corp. 2018 7 */ 8 9#include <linux/spinlock.h> 10#include <linux/mutex.h> 11#include <linux/slab.h> 12#include <asm/page.h> 13 14#include "smc.h" 15#include "smc_core.h" 16#include "smc_ism.h" 17#include "smc_pnet.h" 18 19struct smcd_dev_list smcd_dev_list = { 20 .list = LIST_HEAD_INIT(smcd_dev_list.list), 21 .mutex = __MUTEX_INITIALIZER(smcd_dev_list.mutex) 22}; 23 24bool smc_ism_v2_capable; 25 26/* Test if an ISM communication is possible - same CPC */ 27int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *smcd) 28{ 29 return smcd->ops->query_remote_gid(smcd, peer_gid, vlan_id ? 1 : 0, 30 vlan_id); 31} 32 33int smc_ism_write(struct smcd_dev *smcd, const struct smc_ism_position *pos, 34 void *data, size_t len) 35{ 36 int rc; 37 38 rc = smcd->ops->move_data(smcd, pos->token, pos->index, pos->signal, 39 pos->offset, data, len); 40 41 return rc < 0 ? rc : 0; 42} 43 44void smc_ism_get_system_eid(struct smcd_dev *smcd, u8 **eid) 45{ 46 smcd->ops->get_system_eid(smcd, eid); 47} 48 49u16 smc_ism_get_chid(struct smcd_dev *smcd) 50{ 51 return smcd->ops->get_chid(smcd); 52} 53 54/* Set a connection using this DMBE. */ 55void smc_ism_set_conn(struct smc_connection *conn) 56{ 57 unsigned long flags; 58 59 spin_lock_irqsave(&conn->lgr->smcd->lock, flags); 60 conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = conn; 61 spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags); 62} 63 64/* Unset a connection using this DMBE. */ 65void smc_ism_unset_conn(struct smc_connection *conn) 66{ 67 unsigned long flags; 68 69 if (!conn->rmb_desc) 70 return; 71 72 spin_lock_irqsave(&conn->lgr->smcd->lock, flags); 73 conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = NULL; 74 spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags); 75} 76 77/* Register a VLAN identifier with the ISM device. Use a reference count 78 * and add a VLAN identifier only when the first DMB using this VLAN is 79 * registered. 80 */ 81int smc_ism_get_vlan(struct smcd_dev *smcd, unsigned short vlanid) 82{ 83 struct smc_ism_vlanid *new_vlan, *vlan; 84 unsigned long flags; 85 int rc = 0; 86 87 if (!vlanid) /* No valid vlan id */ 88 return -EINVAL; 89 90 /* create new vlan entry, in case we need it */ 91 new_vlan = kzalloc(sizeof(*new_vlan), GFP_KERNEL); 92 if (!new_vlan) 93 return -ENOMEM; 94 new_vlan->vlanid = vlanid; 95 refcount_set(&new_vlan->refcnt, 1); 96 97 /* if there is an existing entry, increase count and return */ 98 spin_lock_irqsave(&smcd->lock, flags); 99 list_for_each_entry(vlan, &smcd->vlan, list) { 100 if (vlan->vlanid == vlanid) { 101 refcount_inc(&vlan->refcnt); 102 kfree(new_vlan); 103 goto out; 104 } 105 } 106 107 /* no existing entry found. 108 * add new entry to device; might fail, e.g., if HW limit reached 109 */ 110 if (smcd->ops->add_vlan_id(smcd, vlanid)) { 111 kfree(new_vlan); 112 rc = -EIO; 113 goto out; 114 } 115 list_add_tail(&new_vlan->list, &smcd->vlan); 116out: 117 spin_unlock_irqrestore(&smcd->lock, flags); 118 return rc; 119} 120 121/* Unregister a VLAN identifier with the ISM device. Use a reference count 122 * and remove a VLAN identifier only when the last DMB using this VLAN is 123 * unregistered. 124 */ 125int smc_ism_put_vlan(struct smcd_dev *smcd, unsigned short vlanid) 126{ 127 struct smc_ism_vlanid *vlan; 128 unsigned long flags; 129 bool found = false; 130 int rc = 0; 131 132 if (!vlanid) /* No valid vlan id */ 133 return -EINVAL; 134 135 spin_lock_irqsave(&smcd->lock, flags); 136 list_for_each_entry(vlan, &smcd->vlan, list) { 137 if (vlan->vlanid == vlanid) { 138 if (!refcount_dec_and_test(&vlan->refcnt)) 139 goto out; 140 found = true; 141 break; 142 } 143 } 144 if (!found) { 145 rc = -ENOENT; 146 goto out; /* VLAN id not in table */ 147 } 148 149 /* Found and the last reference just gone */ 150 if (smcd->ops->del_vlan_id(smcd, vlanid)) 151 rc = -EIO; 152 list_del(&vlan->list); 153 kfree(vlan); 154out: 155 spin_unlock_irqrestore(&smcd->lock, flags); 156 return rc; 157} 158 159int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc) 160{ 161 struct smcd_dmb dmb; 162 int rc = 0; 163 164 if (!dmb_desc->dma_addr) 165 return rc; 166 167 memset(&dmb, 0, sizeof(dmb)); 168 dmb.dmb_tok = dmb_desc->token; 169 dmb.sba_idx = dmb_desc->sba_idx; 170 dmb.cpu_addr = dmb_desc->cpu_addr; 171 dmb.dma_addr = dmb_desc->dma_addr; 172 dmb.dmb_len = dmb_desc->len; 173 rc = smcd->ops->unregister_dmb(smcd, &dmb); 174 if (!rc || rc == ISM_ERROR) { 175 dmb_desc->cpu_addr = NULL; 176 dmb_desc->dma_addr = 0; 177 } 178 179 return rc; 180} 181 182int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len, 183 struct smc_buf_desc *dmb_desc) 184{ 185 struct smcd_dmb dmb; 186 int rc; 187 188 memset(&dmb, 0, sizeof(dmb)); 189 dmb.dmb_len = dmb_len; 190 dmb.sba_idx = dmb_desc->sba_idx; 191 dmb.vlan_id = lgr->vlan_id; 192 dmb.rgid = lgr->peer_gid; 193 rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb); 194 if (!rc) { 195 dmb_desc->sba_idx = dmb.sba_idx; 196 dmb_desc->token = dmb.dmb_tok; 197 dmb_desc->cpu_addr = dmb.cpu_addr; 198 dmb_desc->dma_addr = dmb.dma_addr; 199 dmb_desc->len = dmb.dmb_len; 200 } 201 return rc; 202} 203 204struct smc_ism_event_work { 205 struct work_struct work; 206 struct smcd_dev *smcd; 207 struct smcd_event event; 208}; 209 210#define ISM_EVENT_REQUEST 0x0001 211#define ISM_EVENT_RESPONSE 0x0002 212#define ISM_EVENT_REQUEST_IR 0x00000001 213#define ISM_EVENT_CODE_SHUTDOWN 0x80 214#define ISM_EVENT_CODE_TESTLINK 0x83 215 216union smcd_sw_event_info { 217 u64 info; 218 struct { 219 u8 uid[SMC_LGR_ID_SIZE]; 220 unsigned short vlan_id; 221 u16 code; 222 }; 223}; 224 225static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) 226{ 227 union smcd_sw_event_info ev_info; 228 229 ev_info.info = wrk->event.info; 230 switch (wrk->event.code) { 231 case ISM_EVENT_CODE_SHUTDOWN: /* Peer shut down DMBs */ 232 smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id); 233 break; 234 case ISM_EVENT_CODE_TESTLINK: /* Activity timer */ 235 if (ev_info.code == ISM_EVENT_REQUEST) { 236 ev_info.code = ISM_EVENT_RESPONSE; 237 wrk->smcd->ops->signal_event(wrk->smcd, 238 wrk->event.tok, 239 ISM_EVENT_REQUEST_IR, 240 ISM_EVENT_CODE_TESTLINK, 241 ev_info.info); 242 } 243 break; 244 } 245} 246 247int smc_ism_signal_shutdown(struct smc_link_group *lgr) 248{ 249 int rc; 250 union smcd_sw_event_info ev_info; 251 252 if (lgr->peer_shutdown) 253 return 0; 254 255 memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE); 256 ev_info.vlan_id = lgr->vlan_id; 257 ev_info.code = ISM_EVENT_REQUEST; 258 rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid, 259 ISM_EVENT_REQUEST_IR, 260 ISM_EVENT_CODE_SHUTDOWN, 261 ev_info.info); 262 return rc; 263} 264 265/* worker for SMC-D events */ 266static void smc_ism_event_work(struct work_struct *work) 267{ 268 struct smc_ism_event_work *wrk = 269 container_of(work, struct smc_ism_event_work, work); 270 271 switch (wrk->event.type) { 272 case ISM_EVENT_GID: /* GID event, token is peer GID */ 273 smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK); 274 break; 275 case ISM_EVENT_DMB: 276 break; 277 case ISM_EVENT_SWR: /* Software defined event */ 278 smcd_handle_sw_event(wrk); 279 break; 280 } 281 kfree(wrk); 282} 283 284static void smcd_release(struct device *dev) 285{ 286 struct smcd_dev *smcd = container_of(dev, struct smcd_dev, dev); 287 288 kfree(smcd->conn); 289 kfree(smcd); 290} 291 292struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name, 293 const struct smcd_ops *ops, int max_dmbs) 294{ 295 struct smcd_dev *smcd; 296 297 smcd = kzalloc(sizeof(*smcd), GFP_KERNEL); 298 if (!smcd) 299 return NULL; 300 smcd->conn = kcalloc(max_dmbs, sizeof(struct smc_connection *), 301 GFP_KERNEL); 302 if (!smcd->conn) { 303 kfree(smcd); 304 return NULL; 305 } 306 307 smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)", 308 WQ_MEM_RECLAIM, name); 309 if (!smcd->event_wq) { 310 kfree(smcd->conn); 311 kfree(smcd); 312 return NULL; 313 } 314 315 smcd->dev.parent = parent; 316 smcd->dev.release = smcd_release; 317 device_initialize(&smcd->dev); 318 dev_set_name(&smcd->dev, name); 319 smcd->ops = ops; 320 if (smc_pnetid_by_dev_port(parent, 0, smcd->pnetid)) 321 smc_pnetid_by_table_smcd(smcd); 322 323 spin_lock_init(&smcd->lock); 324 spin_lock_init(&smcd->lgr_lock); 325 INIT_LIST_HEAD(&smcd->vlan); 326 INIT_LIST_HEAD(&smcd->lgr_list); 327 init_waitqueue_head(&smcd->lgrs_deleted); 328 return smcd; 329} 330EXPORT_SYMBOL_GPL(smcd_alloc_dev); 331 332int smcd_register_dev(struct smcd_dev *smcd) 333{ 334 int rc; 335 336 mutex_lock(&smcd_dev_list.mutex); 337 if (list_empty(&smcd_dev_list.list)) { 338 u8 *system_eid = NULL; 339 340 smc_ism_get_system_eid(smcd, &system_eid); 341 if (system_eid[24] != '0' || system_eid[28] != '0') 342 smc_ism_v2_capable = true; 343 } 344 /* sort list: devices without pnetid before devices with pnetid */ 345 if (smcd->pnetid[0]) 346 list_add_tail(&smcd->list, &smcd_dev_list.list); 347 else 348 list_add(&smcd->list, &smcd_dev_list.list); 349 mutex_unlock(&smcd_dev_list.mutex); 350 351 pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n", 352 dev_name(&smcd->dev), smcd->pnetid, 353 smcd->pnetid_by_user ? " (user defined)" : ""); 354 355 rc = device_add(&smcd->dev); 356 if (rc) { 357 mutex_lock(&smcd_dev_list.mutex); 358 list_del(&smcd->list); 359 mutex_unlock(&smcd_dev_list.mutex); 360 } 361 362 return rc; 363} 364EXPORT_SYMBOL_GPL(smcd_register_dev); 365 366void smcd_unregister_dev(struct smcd_dev *smcd) 367{ 368 pr_warn_ratelimited("smc: removing smcd device %s\n", 369 dev_name(&smcd->dev)); 370 mutex_lock(&smcd_dev_list.mutex); 371 list_del_init(&smcd->list); 372 mutex_unlock(&smcd_dev_list.mutex); 373 smcd->going_away = 1; 374 smc_smcd_terminate_all(smcd); 375 flush_workqueue(smcd->event_wq); 376 destroy_workqueue(smcd->event_wq); 377 378 device_del(&smcd->dev); 379} 380EXPORT_SYMBOL_GPL(smcd_unregister_dev); 381 382void smcd_free_dev(struct smcd_dev *smcd) 383{ 384 put_device(&smcd->dev); 385} 386EXPORT_SYMBOL_GPL(smcd_free_dev); 387 388/* SMCD Device event handler. Called from ISM device interrupt handler. 389 * Parameters are smcd device pointer, 390 * - event->type (0 --> DMB, 1 --> GID), 391 * - event->code (event code), 392 * - event->tok (either DMB token when event type 0, or GID when event type 1) 393 * - event->time (time of day) 394 * - event->info (debug info). 395 * 396 * Context: 397 * - Function called in IRQ context from ISM device driver event handler. 398 */ 399void smcd_handle_event(struct smcd_dev *smcd, struct smcd_event *event) 400{ 401 struct smc_ism_event_work *wrk; 402 403 if (smcd->going_away) 404 return; 405 /* copy event to event work queue, and let it be handled there */ 406 wrk = kmalloc(sizeof(*wrk), GFP_ATOMIC); 407 if (!wrk) 408 return; 409 INIT_WORK(&wrk->work, smc_ism_event_work); 410 wrk->smcd = smcd; 411 wrk->event = *event; 412 queue_work(smcd->event_wq, &wrk->work); 413} 414EXPORT_SYMBOL_GPL(smcd_handle_event); 415 416/* SMCD Device interrupt handler. Called from ISM device interrupt handler. 417 * Parameters are smcd device pointer and DMB number. Find the connection and 418 * schedule the tasklet for this connection. 419 * 420 * Context: 421 * - Function called in IRQ context from ISM device driver IRQ handler. 422 */ 423void smcd_handle_irq(struct smcd_dev *smcd, unsigned int dmbno) 424{ 425 struct smc_connection *conn = NULL; 426 unsigned long flags; 427 428 spin_lock_irqsave(&smcd->lock, flags); 429 conn = smcd->conn[dmbno]; 430 if (conn && !conn->killed) 431 tasklet_schedule(&conn->rx_tsklet); 432 spin_unlock_irqrestore(&smcd->lock, flags); 433} 434EXPORT_SYMBOL_GPL(smcd_handle_irq); 435 436void __init smc_ism_init(void) 437{ 438 smc_ism_v2_capable = false; 439} 440