]>
Commit | Line | Data |
---|---|---|
413d3347 MZ |
1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
2 | /* | |
3 | * Copyright (c) 2019 Mellanox Technologies. All rights reserved. | |
4 | */ | |
5 | #include <rdma/ib_verbs.h> | |
6 | #include <rdma/rdma_counter.h> | |
7 | ||
8 | #include "core_priv.h" | |
9 | #include "restrack.h" | |
10 | ||
11 | #define ALL_AUTO_MODE_MASKS (RDMA_COUNTER_MASK_QP_TYPE) | |
12 | ||
13 | static int __counter_set_mode(struct rdma_counter_mode *curr, | |
14 | enum rdma_nl_counter_mode new_mode, | |
15 | enum rdma_nl_counter_mask new_mask) | |
16 | { | |
17 | if ((new_mode == RDMA_COUNTER_MODE_AUTO) && | |
18 | ((new_mask & (~ALL_AUTO_MODE_MASKS)) || | |
19 | (curr->mode != RDMA_COUNTER_MODE_NONE))) | |
20 | return -EINVAL; | |
21 | ||
22 | curr->mode = new_mode; | |
23 | curr->mask = new_mask; | |
24 | return 0; | |
25 | } | |
26 | ||
27 | /** | |
28 | * rdma_counter_set_auto_mode() - Turn on/off per-port auto mode | |
29 | * | |
1bd8e0a9 MZ |
30 | * When @on is true, the @mask must be set; When @on is false, it goes |
31 | * into manual mode if there's any counter, so that the user is able to | |
32 | * manually access them. | |
413d3347 MZ |
33 | */ |
34 | int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port, | |
35 | bool on, enum rdma_nl_counter_mask mask) | |
36 | { | |
37 | struct rdma_port_counter *port_counter; | |
38 | int ret; | |
39 | ||
40 | port_counter = &dev->port_data[port].port_counter; | |
41 | mutex_lock(&port_counter->lock); | |
42 | if (on) { | |
43 | ret = __counter_set_mode(&port_counter->mode, | |
44 | RDMA_COUNTER_MODE_AUTO, mask); | |
45 | } else { | |
46 | if (port_counter->mode.mode != RDMA_COUNTER_MODE_AUTO) { | |
47 | ret = -EINVAL; | |
48 | goto out; | |
49 | } | |
1bd8e0a9 MZ |
50 | |
51 | if (port_counter->num_counters) | |
52 | ret = __counter_set_mode(&port_counter->mode, | |
53 | RDMA_COUNTER_MODE_MANUAL, 0); | |
54 | else | |
55 | ret = __counter_set_mode(&port_counter->mode, | |
56 | RDMA_COUNTER_MODE_NONE, 0); | |
413d3347 MZ |
57 | } |
58 | ||
59 | out: | |
60 | mutex_unlock(&port_counter->lock); | |
61 | return ret; | |
62 | } | |
63 | ||
99fa331d MZ |
64 | static struct rdma_counter *rdma_counter_alloc(struct ib_device *dev, u8 port, |
65 | enum rdma_nl_counter_mode mode) | |
66 | { | |
1bd8e0a9 | 67 | struct rdma_port_counter *port_counter; |
99fa331d | 68 | struct rdma_counter *counter; |
1bd8e0a9 | 69 | int ret; |
99fa331d | 70 | |
c4ffee7c | 71 | if (!dev->ops.counter_dealloc || !dev->ops.counter_alloc_stats) |
99fa331d MZ |
72 | return NULL; |
73 | ||
74 | counter = kzalloc(sizeof(*counter), GFP_KERNEL); | |
75 | if (!counter) | |
76 | return NULL; | |
77 | ||
78 | counter->device = dev; | |
79 | counter->port = port; | |
80 | counter->res.type = RDMA_RESTRACK_COUNTER; | |
c4ffee7c MZ |
81 | counter->stats = dev->ops.counter_alloc_stats(counter); |
82 | if (!counter->stats) | |
83 | goto err_stats; | |
84 | ||
1bd8e0a9 MZ |
85 | port_counter = &dev->port_data[port].port_counter; |
86 | mutex_lock(&port_counter->lock); | |
87 | if (mode == RDMA_COUNTER_MODE_MANUAL) { | |
88 | ret = __counter_set_mode(&port_counter->mode, | |
89 | RDMA_COUNTER_MODE_MANUAL, 0); | |
90 | if (ret) | |
91 | goto err_mode; | |
92 | } | |
93 | ||
94 | port_counter->num_counters++; | |
95 | mutex_unlock(&port_counter->lock); | |
96 | ||
99fa331d MZ |
97 | counter->mode.mode = mode; |
98 | kref_init(&counter->kref); | |
99 | mutex_init(&counter->lock); | |
100 | ||
101 | return counter; | |
c4ffee7c | 102 | |
1bd8e0a9 MZ |
103 | err_mode: |
104 | mutex_unlock(&port_counter->lock); | |
105 | kfree(counter->stats); | |
c4ffee7c MZ |
106 | err_stats: |
107 | kfree(counter); | |
108 | return NULL; | |
99fa331d MZ |
109 | } |
110 | ||
111 | static void rdma_counter_free(struct rdma_counter *counter) | |
112 | { | |
1bd8e0a9 MZ |
113 | struct rdma_port_counter *port_counter; |
114 | ||
115 | port_counter = &counter->device->port_data[counter->port].port_counter; | |
116 | mutex_lock(&port_counter->lock); | |
117 | port_counter->num_counters--; | |
118 | if (!port_counter->num_counters && | |
119 | (port_counter->mode.mode == RDMA_COUNTER_MODE_MANUAL)) | |
120 | __counter_set_mode(&port_counter->mode, RDMA_COUNTER_MODE_NONE, | |
121 | 0); | |
122 | ||
123 | mutex_unlock(&port_counter->lock); | |
124 | ||
99fa331d | 125 | rdma_restrack_del(&counter->res); |
c4ffee7c | 126 | kfree(counter->stats); |
99fa331d MZ |
127 | kfree(counter); |
128 | } | |
129 | ||
130 | static void auto_mode_init_counter(struct rdma_counter *counter, | |
131 | const struct ib_qp *qp, | |
132 | enum rdma_nl_counter_mask new_mask) | |
133 | { | |
134 | struct auto_mode_param *param = &counter->mode.param; | |
135 | ||
136 | counter->mode.mode = RDMA_COUNTER_MODE_AUTO; | |
137 | counter->mode.mask = new_mask; | |
138 | ||
139 | if (new_mask & RDMA_COUNTER_MASK_QP_TYPE) | |
140 | param->qp_type = qp->qp_type; | |
141 | } | |
142 | ||
143 | static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter, | |
144 | enum rdma_nl_counter_mask auto_mask) | |
145 | { | |
146 | struct auto_mode_param *param = &counter->mode.param; | |
147 | bool match = true; | |
148 | ||
149 | if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res)) | |
150 | return false; | |
151 | ||
152 | /* Ensure that counter belong to right PID */ | |
153 | if (!rdma_is_kernel_res(&counter->res) && | |
154 | !rdma_is_kernel_res(&qp->res) && | |
155 | (task_pid_vnr(counter->res.task) != current->pid)) | |
156 | return false; | |
157 | ||
158 | if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE) | |
159 | match &= (param->qp_type == qp->qp_type); | |
160 | ||
161 | return match; | |
162 | } | |
163 | ||
164 | static int __rdma_counter_bind_qp(struct rdma_counter *counter, | |
165 | struct ib_qp *qp) | |
166 | { | |
167 | int ret; | |
168 | ||
169 | if (qp->counter) | |
170 | return -EINVAL; | |
171 | ||
172 | if (!qp->device->ops.counter_bind_qp) | |
173 | return -EOPNOTSUPP; | |
174 | ||
175 | mutex_lock(&counter->lock); | |
176 | ret = qp->device->ops.counter_bind_qp(counter, qp); | |
177 | mutex_unlock(&counter->lock); | |
178 | ||
179 | return ret; | |
180 | } | |
181 | ||
182 | static int __rdma_counter_unbind_qp(struct ib_qp *qp) | |
183 | { | |
184 | struct rdma_counter *counter = qp->counter; | |
185 | int ret; | |
186 | ||
187 | if (!qp->device->ops.counter_unbind_qp) | |
188 | return -EOPNOTSUPP; | |
189 | ||
190 | mutex_lock(&counter->lock); | |
191 | ret = qp->device->ops.counter_unbind_qp(qp); | |
192 | mutex_unlock(&counter->lock); | |
193 | ||
194 | return ret; | |
195 | } | |
196 | ||
f34a55e4 MZ |
197 | static void counter_history_stat_update(const struct rdma_counter *counter) |
198 | { | |
199 | struct ib_device *dev = counter->device; | |
200 | struct rdma_port_counter *port_counter; | |
201 | int i; | |
202 | ||
203 | port_counter = &dev->port_data[counter->port].port_counter; | |
204 | if (!port_counter->hstats) | |
205 | return; | |
206 | ||
207 | for (i = 0; i < counter->stats->num_counters; i++) | |
208 | port_counter->hstats->value[i] += counter->stats->value[i]; | |
209 | } | |
210 | ||
99fa331d MZ |
211 | /** |
212 | * rdma_get_counter_auto_mode - Find the counter that @qp should be bound | |
213 | * with in auto mode | |
214 | * | |
215 | * Return: The counter (with ref-count increased) if found | |
216 | */ | |
217 | static struct rdma_counter *rdma_get_counter_auto_mode(struct ib_qp *qp, | |
218 | u8 port) | |
219 | { | |
220 | struct rdma_port_counter *port_counter; | |
221 | struct rdma_counter *counter = NULL; | |
222 | struct ib_device *dev = qp->device; | |
223 | struct rdma_restrack_entry *res; | |
224 | struct rdma_restrack_root *rt; | |
225 | unsigned long id = 0; | |
226 | ||
227 | port_counter = &dev->port_data[port].port_counter; | |
228 | rt = &dev->res[RDMA_RESTRACK_COUNTER]; | |
229 | xa_lock(&rt->xa); | |
230 | xa_for_each(&rt->xa, id, res) { | |
231 | if (!rdma_is_visible_in_pid_ns(res)) | |
232 | continue; | |
233 | ||
234 | counter = container_of(res, struct rdma_counter, res); | |
235 | if ((counter->device != qp->device) || (counter->port != port)) | |
236 | goto next; | |
237 | ||
238 | if (auto_mode_match(qp, counter, port_counter->mode.mask)) | |
239 | break; | |
240 | next: | |
241 | counter = NULL; | |
242 | } | |
243 | ||
244 | if (counter && !kref_get_unless_zero(&counter->kref)) | |
245 | counter = NULL; | |
246 | ||
247 | xa_unlock(&rt->xa); | |
248 | return counter; | |
249 | } | |
250 | ||
251 | static void rdma_counter_res_add(struct rdma_counter *counter, | |
252 | struct ib_qp *qp) | |
253 | { | |
254 | if (rdma_is_kernel_res(&qp->res)) { | |
255 | rdma_restrack_set_task(&counter->res, qp->res.kern_name); | |
256 | rdma_restrack_kadd(&counter->res); | |
257 | } else { | |
258 | rdma_restrack_attach_task(&counter->res, qp->res.task); | |
259 | rdma_restrack_uadd(&counter->res); | |
260 | } | |
261 | } | |
262 | ||
263 | static void counter_release(struct kref *kref) | |
264 | { | |
265 | struct rdma_counter *counter; | |
266 | ||
267 | counter = container_of(kref, struct rdma_counter, kref); | |
f34a55e4 | 268 | counter_history_stat_update(counter); |
99fa331d MZ |
269 | counter->device->ops.counter_dealloc(counter); |
270 | rdma_counter_free(counter); | |
271 | } | |
272 | ||
273 | /** | |
274 | * rdma_counter_bind_qp_auto - Check and bind the QP to a counter base on | |
275 | * the auto-mode rule | |
276 | */ | |
277 | int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port) | |
278 | { | |
279 | struct rdma_port_counter *port_counter; | |
280 | struct ib_device *dev = qp->device; | |
281 | struct rdma_counter *counter; | |
282 | int ret; | |
283 | ||
284 | if (!rdma_is_port_valid(dev, port)) | |
285 | return -EINVAL; | |
286 | ||
287 | port_counter = &dev->port_data[port].port_counter; | |
288 | if (port_counter->mode.mode != RDMA_COUNTER_MODE_AUTO) | |
289 | return 0; | |
290 | ||
291 | counter = rdma_get_counter_auto_mode(qp, port); | |
292 | if (counter) { | |
293 | ret = __rdma_counter_bind_qp(counter, qp); | |
294 | if (ret) { | |
295 | kref_put(&counter->kref, counter_release); | |
296 | return ret; | |
297 | } | |
298 | } else { | |
299 | counter = rdma_counter_alloc(dev, port, RDMA_COUNTER_MODE_AUTO); | |
300 | if (!counter) | |
301 | return -ENOMEM; | |
302 | ||
303 | auto_mode_init_counter(counter, qp, port_counter->mode.mask); | |
304 | ||
305 | ret = __rdma_counter_bind_qp(counter, qp); | |
306 | if (ret) { | |
307 | rdma_counter_free(counter); | |
308 | return ret; | |
309 | } | |
310 | ||
311 | rdma_counter_res_add(counter, qp); | |
312 | } | |
313 | ||
314 | return 0; | |
315 | } | |
316 | ||
317 | /** | |
318 | * rdma_counter_unbind_qp - Unbind a qp from a counter | |
319 | * @force: | |
320 | * true - Decrease the counter ref-count anyway (e.g., qp destroy) | |
321 | */ | |
322 | int rdma_counter_unbind_qp(struct ib_qp *qp, bool force) | |
323 | { | |
324 | struct rdma_counter *counter = qp->counter; | |
325 | int ret; | |
326 | ||
327 | if (!counter) | |
328 | return -EINVAL; | |
329 | ||
330 | ret = __rdma_counter_unbind_qp(qp); | |
331 | if (ret && !force) | |
332 | return ret; | |
333 | ||
334 | kref_put(&counter->kref, counter_release); | |
335 | return 0; | |
336 | } | |
337 | ||
c4ffee7c MZ |
338 | int rdma_counter_query_stats(struct rdma_counter *counter) |
339 | { | |
340 | struct ib_device *dev = counter->device; | |
341 | int ret; | |
342 | ||
343 | if (!dev->ops.counter_update_stats) | |
344 | return -EINVAL; | |
345 | ||
346 | mutex_lock(&counter->lock); | |
347 | ret = dev->ops.counter_update_stats(counter); | |
348 | mutex_unlock(&counter->lock); | |
349 | ||
350 | return ret; | |
351 | } | |
352 | ||
f34a55e4 MZ |
353 | static u64 get_running_counters_hwstat_sum(struct ib_device *dev, |
354 | u8 port, u32 index) | |
355 | { | |
356 | struct rdma_restrack_entry *res; | |
357 | struct rdma_restrack_root *rt; | |
358 | struct rdma_counter *counter; | |
359 | unsigned long id = 0; | |
360 | u64 sum = 0; | |
361 | ||
362 | rt = &dev->res[RDMA_RESTRACK_COUNTER]; | |
363 | xa_lock(&rt->xa); | |
364 | xa_for_each(&rt->xa, id, res) { | |
365 | if (!rdma_restrack_get(res)) | |
366 | continue; | |
367 | ||
368 | xa_unlock(&rt->xa); | |
369 | ||
370 | counter = container_of(res, struct rdma_counter, res); | |
371 | if ((counter->device != dev) || (counter->port != port) || | |
372 | rdma_counter_query_stats(counter)) | |
373 | goto next; | |
374 | ||
375 | sum += counter->stats->value[index]; | |
376 | ||
377 | next: | |
378 | xa_lock(&rt->xa); | |
379 | rdma_restrack_put(res); | |
380 | } | |
381 | ||
382 | xa_unlock(&rt->xa); | |
383 | return sum; | |
384 | } | |
385 | ||
386 | /** | |
387 | * rdma_counter_get_hwstat_value() - Get the sum value of all counters on a | |
388 | * specific port, including the running ones and history data | |
389 | */ | |
390 | u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index) | |
391 | { | |
392 | struct rdma_port_counter *port_counter; | |
393 | u64 sum; | |
394 | ||
395 | port_counter = &dev->port_data[port].port_counter; | |
396 | sum = get_running_counters_hwstat_sum(dev, port, index); | |
397 | sum += port_counter->hstats->value[index]; | |
398 | ||
399 | return sum; | |
400 | } | |
401 | ||
1bd8e0a9 MZ |
402 | static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num) |
403 | { | |
404 | struct rdma_restrack_entry *res = NULL; | |
405 | struct ib_qp *qp = NULL; | |
406 | ||
407 | res = rdma_restrack_get_byid(dev, RDMA_RESTRACK_QP, qp_num); | |
408 | if (IS_ERR(res)) | |
409 | return NULL; | |
410 | ||
411 | if (!rdma_is_visible_in_pid_ns(res)) | |
412 | goto err; | |
413 | ||
414 | qp = container_of(res, struct ib_qp, res); | |
415 | if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) | |
416 | goto err; | |
417 | ||
418 | return qp; | |
419 | ||
420 | err: | |
421 | rdma_restrack_put(&qp->res); | |
422 | return NULL; | |
423 | } | |
424 | ||
425 | static int rdma_counter_bind_qp_manual(struct rdma_counter *counter, | |
426 | struct ib_qp *qp) | |
427 | { | |
428 | if ((counter->device != qp->device) || (counter->port != qp->port)) | |
429 | return -EINVAL; | |
430 | ||
431 | return __rdma_counter_bind_qp(counter, qp); | |
432 | } | |
433 | ||
434 | static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev, | |
435 | u32 counter_id) | |
436 | { | |
437 | struct rdma_restrack_entry *res; | |
438 | struct rdma_counter *counter; | |
439 | ||
440 | res = rdma_restrack_get_byid(dev, RDMA_RESTRACK_COUNTER, counter_id); | |
441 | if (IS_ERR(res)) | |
442 | return NULL; | |
443 | ||
444 | if (!rdma_is_visible_in_pid_ns(res)) { | |
445 | rdma_restrack_put(res); | |
446 | return NULL; | |
447 | } | |
448 | ||
449 | counter = container_of(res, struct rdma_counter, res); | |
450 | kref_get(&counter->kref); | |
451 | rdma_restrack_put(res); | |
452 | ||
453 | return counter; | |
454 | } | |
455 | ||
456 | /** | |
457 | * rdma_counter_bind_qpn() - Bind QP @qp_num to counter @counter_id | |
458 | */ | |
459 | int rdma_counter_bind_qpn(struct ib_device *dev, u8 port, | |
460 | u32 qp_num, u32 counter_id) | |
461 | { | |
462 | struct rdma_counter *counter; | |
463 | struct ib_qp *qp; | |
464 | int ret; | |
465 | ||
466 | qp = rdma_counter_get_qp(dev, qp_num); | |
467 | if (!qp) | |
468 | return -ENOENT; | |
469 | ||
470 | counter = rdma_get_counter_by_id(dev, counter_id); | |
471 | if (!counter) { | |
472 | ret = -ENOENT; | |
473 | goto err; | |
474 | } | |
475 | ||
476 | if (counter->res.task != qp->res.task) { | |
477 | ret = -EINVAL; | |
478 | goto err_task; | |
479 | } | |
480 | ||
481 | ret = rdma_counter_bind_qp_manual(counter, qp); | |
482 | if (ret) | |
483 | goto err_task; | |
484 | ||
485 | rdma_restrack_put(&qp->res); | |
486 | return 0; | |
487 | ||
488 | err_task: | |
489 | kref_put(&counter->kref, counter_release); | |
490 | err: | |
491 | rdma_restrack_put(&qp->res); | |
492 | return ret; | |
493 | } | |
494 | ||
495 | /** | |
496 | * rdma_counter_bind_qpn_alloc() - Alloc a counter and bind QP @qp_num to it | |
497 | * The id of new counter is returned in @counter_id | |
498 | */ | |
499 | int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port, | |
500 | u32 qp_num, u32 *counter_id) | |
501 | { | |
502 | struct rdma_counter *counter; | |
503 | struct ib_qp *qp; | |
504 | int ret; | |
505 | ||
506 | if (!rdma_is_port_valid(dev, port)) | |
507 | return -EINVAL; | |
508 | ||
509 | qp = rdma_counter_get_qp(dev, qp_num); | |
510 | if (!qp) | |
511 | return -ENOENT; | |
512 | ||
513 | if (rdma_is_port_valid(dev, qp->port) && (qp->port != port)) { | |
514 | ret = -EINVAL; | |
515 | goto err; | |
516 | } | |
517 | ||
518 | counter = rdma_counter_alloc(dev, port, RDMA_COUNTER_MODE_MANUAL); | |
519 | if (!counter) { | |
520 | ret = -ENOMEM; | |
521 | goto err; | |
522 | } | |
523 | ||
524 | ret = rdma_counter_bind_qp_manual(counter, qp); | |
525 | if (ret) | |
526 | goto err_bind; | |
527 | ||
528 | if (counter_id) | |
529 | *counter_id = counter->id; | |
530 | ||
531 | rdma_counter_res_add(counter, qp); | |
532 | ||
533 | rdma_restrack_put(&qp->res); | |
534 | return ret; | |
535 | ||
536 | err_bind: | |
537 | rdma_counter_free(counter); | |
538 | err: | |
539 | rdma_restrack_put(&qp->res); | |
540 | return ret; | |
541 | } | |
542 | ||
543 | /** | |
544 | * rdma_counter_unbind_qpn() - Unbind QP @qp_num from a counter | |
545 | */ | |
546 | int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port, | |
547 | u32 qp_num, u32 counter_id) | |
548 | { | |
549 | struct rdma_port_counter *port_counter; | |
550 | struct ib_qp *qp; | |
551 | int ret; | |
552 | ||
553 | if (!rdma_is_port_valid(dev, port)) | |
554 | return -EINVAL; | |
555 | ||
556 | qp = rdma_counter_get_qp(dev, qp_num); | |
557 | if (!qp) | |
558 | return -ENOENT; | |
559 | ||
560 | if (rdma_is_port_valid(dev, qp->port) && (qp->port != port)) { | |
561 | ret = -EINVAL; | |
562 | goto out; | |
563 | } | |
564 | ||
565 | port_counter = &dev->port_data[port].port_counter; | |
566 | if (!qp->counter || qp->counter->id != counter_id || | |
567 | port_counter->mode.mode != RDMA_COUNTER_MODE_MANUAL) { | |
568 | ret = -EINVAL; | |
569 | goto out; | |
570 | } | |
571 | ||
572 | ret = rdma_counter_unbind_qp(qp, false); | |
573 | ||
574 | out: | |
575 | rdma_restrack_put(&qp->res); | |
576 | return ret; | |
577 | } | |
578 | ||
83c2c1fc MZ |
579 | int rdma_counter_get_mode(struct ib_device *dev, u8 port, |
580 | enum rdma_nl_counter_mode *mode, | |
581 | enum rdma_nl_counter_mask *mask) | |
582 | { | |
583 | struct rdma_port_counter *port_counter; | |
584 | ||
585 | port_counter = &dev->port_data[port].port_counter; | |
586 | *mode = port_counter->mode.mode; | |
587 | *mask = port_counter->mode.mask; | |
588 | ||
589 | return 0; | |
590 | } | |
591 | ||
413d3347 MZ |
592 | void rdma_counter_init(struct ib_device *dev) |
593 | { | |
594 | struct rdma_port_counter *port_counter; | |
595 | u32 port; | |
596 | ||
597 | if (!dev->ops.alloc_hw_stats || !dev->port_data) | |
598 | return; | |
599 | ||
600 | rdma_for_each_port(dev, port) { | |
601 | port_counter = &dev->port_data[port].port_counter; | |
602 | port_counter->mode.mode = RDMA_COUNTER_MODE_NONE; | |
603 | mutex_init(&port_counter->lock); | |
f34a55e4 MZ |
604 | |
605 | port_counter->hstats = dev->ops.alloc_hw_stats(dev, port); | |
606 | if (!port_counter->hstats) | |
607 | goto fail; | |
413d3347 | 608 | } |
f34a55e4 MZ |
609 | |
610 | return; | |
611 | ||
612 | fail: | |
613 | rdma_for_each_port(dev, port) { | |
614 | port_counter = &dev->port_data[port].port_counter; | |
615 | kfree(port_counter->hstats); | |
616 | port_counter->hstats = NULL; | |
617 | } | |
618 | ||
619 | return; | |
413d3347 MZ |
620 | } |
621 | ||
622 | void rdma_counter_release(struct ib_device *dev) | |
623 | { | |
f34a55e4 MZ |
624 | struct rdma_port_counter *port_counter; |
625 | u32 port; | |
626 | ||
627 | if (!dev->ops.alloc_hw_stats) | |
628 | return; | |
629 | ||
630 | rdma_for_each_port(dev, port) { | |
631 | port_counter = &dev->port_data[port].port_counter; | |
632 | kfree(port_counter->hstats); | |
633 | } | |
413d3347 | 634 | } |