]> git.ipfire.org Git - thirdparty/pciutils.git/blame - lmr/margin.c
ls-ecaps: extend decode support for more fields for AER CE and UE status
[thirdparty/pciutils.git] / lmr / margin.c
CommitLineData
73289e13
NP
1/*
2 * The PCI Utilities -- Obtain the margin information of the Link
3 *
4 * Copyright (c) 2023 KNS Group LLC (YADRO)
5 *
6 * Can be freely distributed and used under the terms of the GNU GPL v2+.
7 *
8 * SPDX-License-Identifier: GPL-2.0-or-later
9 */
10
11#include <errno.h>
de62139f 12#include <stdlib.h>
73289e13
NP
13#include <time.h>
14
15#include "lmr.h"
16
370be0de
PR
17#ifdef PCI_OS_DJGPP
18#include <unistd.h>
19#endif
20
73289e13
NP
21/* Macro helpers for Margining command parsing */
22
23typedef u16 margin_cmd;
24
25/* Margining command parsing */
26
27#define LMR_CMD_RECVN MASK(2, 0)
28#define LMR_CMD_TYPE MASK(5, 3)
29#define LMR_CMD_PAYLOAD MASK(15, 8)
30
31// Payload parsing
32
33// Report Capabilities
34#define LMR_PLD_VOLT_SUPPORT BIT(8)
35#define LMR_PLD_IND_U_D_VOLT BIT(9)
36#define LMR_PLD_IND_L_R_TIM BIT(10)
37#define LMR_PLD_SAMPLE_REPORT_METHOD BIT(11)
38#define LMR_PLD_IND_ERR_SAMPLER BIT(12)
39
40#define LMR_PLD_MAX_T_STEPS MASK(13, 8)
41#define LMR_PLD_MAX_V_STEPS MASK(14, 8)
42#define LMR_PLD_MAX_OFFSET MASK(14, 8)
43#define LMR_PLD_MAX_LANES MASK(12, 8)
44#define LMR_PLD_SAMPLE_RATE MASK(13, 8)
45
46// Timing Step
47#define LMR_PLD_MARGIN_T_STEPS MASK(13, 8)
48#define LMR_PLD_T_GO_LEFT BIT(14)
49
50// Voltage Timing
51#define LMR_PLD_MARGIN_V_STEPS MASK(14, 8)
52#define LMR_PLD_V_GO_DOWN BIT(15)
53
54// Step Response
55#define LMR_PLD_ERR_CNT MASK(13, 8)
56#define LMR_PLD_MARGIN_STS MASK(15, 14)
57
58/* Address calc macro for Lanes Margining registers */
59
60#define LMR_LANE_CTRL(lmr_cap_addr, lane) ((lmr_cap_addr) + 8 + 4 * (lane))
61#define LMR_LANE_STATUS(lmr_cap_addr, lane) ((lmr_cap_addr) + 10 + 4 * (lane))
62
63/* Margining Commands */
64
65#define MARG_TIM(go_left, step, recvn) margin_make_cmd(((go_left) << 6) | (step), 3, recvn)
66#define MARG_VOLT(go_down, step, recvn) margin_make_cmd(((go_down) << 7) | (step), 4, recvn)
67
68// Report commands
69#define REPORT_CAPS(recvn) margin_make_cmd(0x88, 1, recvn)
70#define REPORT_VOL_STEPS(recvn) margin_make_cmd(0x89, 1, recvn)
71#define REPORT_TIM_STEPS(recvn) margin_make_cmd(0x8A, 1, recvn)
72#define REPORT_TIM_OFFSET(recvn) margin_make_cmd(0x8B, 1, recvn)
73#define REPORT_VOL_OFFSET(recvn) margin_make_cmd(0x8C, 1, recvn)
74#define REPORT_SAMPL_RATE_V(recvn) margin_make_cmd(0x8D, 1, recvn)
75#define REPORT_SAMPL_RATE_T(recvn) margin_make_cmd(0x8E, 1, recvn)
76#define REPORT_SAMPLE_CNT(recvn) margin_make_cmd(0x8F, 1, recvn)
77#define REPORT_MAX_LANES(recvn) margin_make_cmd(0x90, 1, recvn)
78
79// Set commands
80#define NO_COMMAND margin_make_cmd(0x9C, 7, 0)
81#define CLEAR_ERROR_LOG(recvn) margin_make_cmd(0x55, 2, recvn)
82#define GO_TO_NORMAL_SETTINGS(recvn) margin_make_cmd(0xF, 2, recvn)
83#define SET_ERROR_LIMIT(error_limit, recvn) margin_make_cmd(0xC0 | (error_limit), 2, recvn)
84
85static int
86msleep(long msec)
87{
370be0de
PR
88#if defined(PCI_OS_WINDOWS)
89 Sleep(msec);
90 return 0;
91#elif defined(PCI_OS_DJGPP)
92 if (msec * 1000 < 11264)
93 usleep(11264);
94 else
95 usleep(msec * 1000);
96 return 0;
97#else
73289e13
NP
98 struct timespec ts;
99 int res;
100
101 if (msec < 0)
102 {
103 errno = EINVAL;
104 return -1;
105 }
106
107 ts.tv_sec = msec / 1000;
108 ts.tv_nsec = (msec % 1000) * 1000000;
109
110 do
111 {
112 res = nanosleep(&ts, &ts);
113 } while (res && errno == EINTR);
114
115 return res;
370be0de 116#endif
73289e13
NP
117}
118
119static margin_cmd
120margin_make_cmd(u8 payload, u8 type, u8 recvn)
121{
122 return SET_REG_MASK(0, LMR_CMD_PAYLOAD, payload) | SET_REG_MASK(0, LMR_CMD_TYPE, type)
123 | SET_REG_MASK(0, LMR_CMD_RECVN, recvn);
124}
125
126static bool
127margin_set_cmd(struct margin_dev *dev, u8 lane, margin_cmd cmd)
128{
129 pci_write_word(dev->dev, LMR_LANE_CTRL(dev->lmr_cap_addr, lane), cmd);
130 msleep(10);
131 return pci_read_word(dev->dev, LMR_LANE_STATUS(dev->lmr_cap_addr, lane)) == cmd;
132}
133
134static bool
135margin_report_cmd(struct margin_dev *dev, u8 lane, margin_cmd cmd, margin_cmd *result)
136{
137 pci_write_word(dev->dev, LMR_LANE_CTRL(dev->lmr_cap_addr, lane), cmd);
138 msleep(10);
139 *result = pci_read_word(dev->dev, LMR_LANE_STATUS(dev->lmr_cap_addr, lane));
140 return GET_REG_MASK(*result, LMR_CMD_TYPE) == GET_REG_MASK(cmd, LMR_CMD_TYPE)
141 && GET_REG_MASK(*result, LMR_CMD_RECVN) == GET_REG_MASK(cmd, LMR_CMD_RECVN)
142 && margin_set_cmd(dev, lane, NO_COMMAND);
143}
144
72f92bee
NP
145static void
146margin_apply_hw_quirks(struct margin_recv *recv)
147{
148 switch (recv->dev->hw)
149 {
150 case MARGIN_ICE_LAKE_RC:
151 if (recv->recvn == 1)
152 recv->params->volt_offset = 12;
153 break;
154 default:
155 break;
156 }
157}
158
73289e13
NP
159static bool
160read_params_internal(struct margin_dev *dev, u8 recvn, bool lane_reversal,
161 struct margin_params *params)
162{
163 margin_cmd resp;
164 u8 lane = lane_reversal ? dev->width - 1 : 0;
165 margin_set_cmd(dev, lane, NO_COMMAND);
166 bool status = margin_report_cmd(dev, lane, REPORT_CAPS(recvn), &resp);
167 if (status)
168 {
169 params->volt_support = GET_REG_MASK(resp, LMR_PLD_VOLT_SUPPORT);
170 params->ind_up_down_volt = GET_REG_MASK(resp, LMR_PLD_IND_U_D_VOLT);
171 params->ind_left_right_tim = GET_REG_MASK(resp, LMR_PLD_IND_L_R_TIM);
172 params->sample_report_method = GET_REG_MASK(resp, LMR_PLD_SAMPLE_REPORT_METHOD);
173 params->ind_error_sampler = GET_REG_MASK(resp, LMR_PLD_IND_ERR_SAMPLER);
174 status = margin_report_cmd(dev, lane, REPORT_VOL_STEPS(recvn), &resp);
175 }
176 if (status)
177 {
178 params->volt_steps = GET_REG_MASK(resp, LMR_PLD_MAX_V_STEPS);
179 status = margin_report_cmd(dev, lane, REPORT_TIM_STEPS(recvn), &resp);
180 }
181 if (status)
182 {
183 params->timing_steps = GET_REG_MASK(resp, LMR_PLD_MAX_T_STEPS);
184 status = margin_report_cmd(dev, lane, REPORT_TIM_OFFSET(recvn), &resp);
185 }
186 if (status)
187 {
188 params->timing_offset = GET_REG_MASK(resp, LMR_PLD_MAX_OFFSET);
189 status = margin_report_cmd(dev, lane, REPORT_VOL_OFFSET(recvn), &resp);
190 }
191 if (status)
192 {
193 params->volt_offset = GET_REG_MASK(resp, LMR_PLD_MAX_OFFSET);
194 status = margin_report_cmd(dev, lane, REPORT_SAMPL_RATE_V(recvn), &resp);
195 }
196 if (status)
197 {
198 params->sample_rate_v = GET_REG_MASK(resp, LMR_PLD_SAMPLE_RATE);
199 status = margin_report_cmd(dev, lane, REPORT_SAMPL_RATE_T(recvn), &resp);
200 }
201 if (status)
202 {
203 params->sample_rate_t = GET_REG_MASK(resp, LMR_PLD_SAMPLE_RATE);
204 status = margin_report_cmd(dev, lane, REPORT_MAX_LANES(recvn), &resp);
205 }
206 if (status)
207 params->max_lanes = GET_REG_MASK(resp, LMR_PLD_MAX_LANES);
208 return status;
209}
210
211/* Margin all lanes_n lanes simultaneously */
212static void
213margin_test_lanes(struct margin_lanes_data arg)
214{
215 u8 steps_done = 0;
216 margin_cmd lane_status;
217 u8 marg_type;
218 margin_cmd step_cmd;
219 bool timing = (arg.dir == TIM_LEFT || arg.dir == TIM_RIGHT);
220
221 if (timing)
222 {
223 marg_type = 3;
224 step_cmd = MARG_TIM(arg.dir == TIM_LEFT, steps_done, arg.recv->recvn);
225 }
226 else
227 {
228 marg_type = 4;
229 step_cmd = MARG_VOLT(arg.dir == VOLT_DOWN, steps_done, arg.recv->recvn);
230 }
231
232 bool failed_lanes[32] = { 0 };
233 u8 alive_lanes = arg.lanes_n;
234
235 for (int i = 0; i < arg.lanes_n; i++)
236 {
237 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
238 margin_set_cmd(arg.recv->dev, arg.results[i].lane,
239 SET_ERROR_LIMIT(arg.recv->error_limit, arg.recv->recvn));
240 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
241 arg.results[i].steps[arg.dir] = arg.steps_lane_total;
242 arg.results[i].statuses[arg.dir] = MARGIN_THR;
243 }
244
245 while (alive_lanes > 0 && steps_done < arg.steps_lane_total)
246 {
247 alive_lanes = 0;
248 steps_done++;
249 if (timing)
250 step_cmd = SET_REG_MASK(step_cmd, LMR_PLD_MARGIN_T_STEPS, steps_done);
251 else
252 step_cmd = SET_REG_MASK(step_cmd, LMR_PLD_MARGIN_V_STEPS, steps_done);
253
254 for (int i = 0; i < arg.lanes_n; i++)
255 {
256 if (!failed_lanes[i])
257 {
258 alive_lanes++;
259 int ctrl_addr = LMR_LANE_CTRL(arg.recv->dev->lmr_cap_addr, arg.results[i].lane);
260 pci_write_word(arg.recv->dev->dev, ctrl_addr, step_cmd);
261 }
262 }
263 msleep(MARGIN_STEP_MS);
264
265 for (int i = 0; i < arg.lanes_n; i++)
266 {
267 if (!failed_lanes[i])
268 {
269 int status_addr = LMR_LANE_STATUS(arg.recv->dev->lmr_cap_addr, arg.results[i].lane);
270 lane_status = pci_read_word(arg.recv->dev->dev, status_addr);
271 u8 step_status = GET_REG_MASK(lane_status, LMR_PLD_MARGIN_STS);
272 if (!(GET_REG_MASK(lane_status, LMR_CMD_TYPE) == marg_type
273 && GET_REG_MASK(lane_status, LMR_CMD_RECVN) == arg.recv->recvn
274 && step_status == 2
275 && GET_REG_MASK(lane_status, LMR_PLD_ERR_CNT) <= arg.recv->error_limit
276 && margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND)))
277 {
278 alive_lanes--;
279 failed_lanes[i] = true;
280 arg.results[i].steps[arg.dir] = steps_done - 1;
281 arg.results[i].statuses[arg.dir]
282 = (step_status == 3 || step_status == 1 ? MARGIN_NAK : MARGIN_LIM);
283 }
284 }
285 }
286
287 arg.steps_lane_done = steps_done;
c04cf7c0 288 margin_log_margining(arg);
73289e13
NP
289 }
290
291 for (int i = 0; i < arg.lanes_n; i++)
292 {
293 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
294 margin_set_cmd(arg.recv->dev, arg.results[i].lane, CLEAR_ERROR_LOG(arg.recv->recvn));
295 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
296 margin_set_cmd(arg.recv->dev, arg.results[i].lane, GO_TO_NORMAL_SETTINGS(arg.recv->recvn));
297 margin_set_cmd(arg.recv->dev, arg.results[i].lane, NO_COMMAND);
298 }
299}
300
301/* Awaits that Receiver is prepared through prep_dev function */
302static bool
303margin_test_receiver(struct margin_dev *dev, u8 recvn, struct margin_args *args,
304 struct margin_results *results)
305{
306 u8 *lanes_to_margin = args->lanes;
307 u8 lanes_n = args->lanes_n;
308
309 struct margin_params params;
310 struct margin_recv recv = { .dev = dev,
311 .recvn = recvn,
312 .lane_reversal = false,
313 .params = &params,
314 .parallel_lanes = args->parallel_lanes ? args->parallel_lanes : 1,
315 .error_limit = args->error_limit };
316
317 results->recvn = recvn;
318 results->lanes_n = lanes_n;
c04cf7c0 319 margin_log_recvn(&recv);
73289e13
NP
320
321 if (!margin_check_ready_bit(dev->dev))
322 {
c04cf7c0 323 margin_log("\nMargining Ready bit is Clear.\n");
73289e13
NP
324 results->test_status = MARGIN_TEST_READY_BIT;
325 return false;
326 }
327
328 if (!read_params_internal(dev, recvn, recv.lane_reversal, &params))
329 {
330 recv.lane_reversal = true;
331 if (!read_params_internal(dev, recvn, recv.lane_reversal, &params))
332 {
c04cf7c0 333 margin_log("\nError during caps reading.\n");
73289e13
NP
334 results->test_status = MARGIN_TEST_CAPS;
335 return false;
336 }
337 }
338
339 results->params = params;
340
341 if (recv.parallel_lanes > params.max_lanes + 1)
342 recv.parallel_lanes = params.max_lanes + 1;
72f92bee
NP
343 margin_apply_hw_quirks(&recv);
344 margin_log_hw_quirks(&recv);
73289e13 345
4eba399e
NP
346 results->tim_off_reported = params.timing_offset != 0;
347 results->volt_off_reported = params.volt_offset != 0;
348 double tim_offset = results->tim_off_reported ? (double)params.timing_offset : 50.0;
349 double volt_offset = results->volt_off_reported ? (double)params.volt_offset : 50.0;
350
351 results->tim_coef = tim_offset / (double)params.timing_steps;
352 results->volt_coef = volt_offset / (double)params.volt_steps * 10.0;
73289e13
NP
353
354 results->lane_reversal = recv.lane_reversal;
355 results->link_speed = dev->link_speed;
356 results->test_status = MARGIN_TEST_OK;
357
c04cf7c0
NP
358 margin_log_receiver(&recv);
359
73289e13
NP
360 results->lanes = xmalloc(sizeof(struct margin_res_lane) * lanes_n);
361 for (int i = 0; i < lanes_n; i++)
362 {
363 results->lanes[i].lane
364 = recv.lane_reversal ? dev->width - lanes_to_margin[i] - 1 : lanes_to_margin[i];
365 }
366
367 if (args->run_margin)
368 {
c04cf7c0
NP
369 if (args->verbosity > 0)
370 margin_log("\n");
73289e13
NP
371 struct margin_lanes_data lanes_data
372 = { .recv = &recv, .verbosity = args->verbosity, .steps_utility = args->steps_utility };
373
374 enum margin_dir dir[] = { TIM_LEFT, TIM_RIGHT, VOLT_UP, VOLT_DOWN };
375
376 u8 lanes_done = 0;
377 u8 use_lanes = 0;
378 u8 steps_t = args->steps_t ? args->steps_t : params.timing_steps;
379 u8 steps_v = args->steps_v ? args->steps_v : params.volt_steps;
380
381 while (lanes_done != lanes_n)
382 {
383 use_lanes = (lanes_done + recv.parallel_lanes > lanes_n) ? lanes_n - lanes_done :
384 recv.parallel_lanes;
385 lanes_data.lanes_numbers = lanes_to_margin + lanes_done;
386 lanes_data.lanes_n = use_lanes;
387 lanes_data.results = results->lanes + lanes_done;
388
389 for (int i = 0; i < 4; i++)
390 {
391 bool timing = dir[i] == TIM_LEFT || dir[i] == TIM_RIGHT;
392 if (!timing && !params.volt_support)
393 continue;
394 if (dir[i] == TIM_RIGHT && !params.ind_left_right_tim)
395 continue;
396 if (dir[i] == VOLT_DOWN && !params.ind_up_down_volt)
397 continue;
398
399 lanes_data.ind = timing ? params.ind_left_right_tim : params.ind_up_down_volt;
400 lanes_data.dir = dir[i];
401 lanes_data.steps_lane_total = timing ? steps_t : steps_v;
402 if (*args->steps_utility >= lanes_data.steps_lane_total)
403 *args->steps_utility -= lanes_data.steps_lane_total;
404 else
405 *args->steps_utility = 0;
406 margin_test_lanes(lanes_data);
407 }
408 lanes_done += use_lanes;
409 }
c04cf7c0
NP
410 if (args->verbosity > 0)
411 margin_log("\n");
73289e13
NP
412 if (recv.lane_reversal)
413 {
414 for (int i = 0; i < lanes_n; i++)
415 results->lanes[i].lane = lanes_to_margin[i];
416 }
417 }
418
419 return true;
420}
421
422bool
423margin_read_params(struct pci_access *pacc, struct pci_dev *dev, u8 recvn,
424 struct margin_params *params)
425{
426 struct pci_cap *cap = pci_find_cap(dev, PCI_CAP_ID_EXP, PCI_CAP_NORMAL);
427 if (!cap)
428 return false;
429 u8 dev_dir = GET_REG_MASK(pci_read_word(dev, cap->addr + PCI_EXP_FLAGS), PCI_EXP_FLAGS_TYPE);
430
431 bool dev_down;
432 if (dev_dir == PCI_EXP_TYPE_ROOT_PORT || dev_dir == PCI_EXP_TYPE_DOWNSTREAM)
433 dev_down = true;
434 else
435 dev_down = false;
436
437 if (recvn == 0)
438 {
439 if (dev_down)
440 recvn = 1;
441 else
442 recvn = 6;
443 }
444
445 if (recvn > 6)
446 return false;
447 if (dev_down && recvn == 6)
448 return false;
449 if (!dev_down && recvn != 6)
450 return false;
451
452 struct pci_dev *down = NULL;
453 struct pci_dev *up = NULL;
454 struct margin_link link;
455
456 for (struct pci_dev *p = pacc->devices; p; p = p->next)
457 {
458 if (dev_down && pci_read_byte(dev, PCI_SECONDARY_BUS) == p->bus && dev->domain == p->domain
459 && p->func == 0)
460 {
461 down = dev;
462 up = p;
463 break;
464 }
465 else if (!dev_down && pci_read_byte(p, PCI_SECONDARY_BUS) == dev->bus
466 && dev->domain == p->domain)
467 {
468 down = p;
469 up = dev;
470 break;
471 }
472 }
473
474 if (!down)
475 return false;
476
477 if (!margin_fill_link(down, up, &link))
478 return false;
479
480 struct margin_dev *dut = (dev_down ? &link.down_port : &link.up_port);
481 if (!margin_check_ready_bit(dut->dev))
482 return false;
483
484 if (!margin_prep_link(&link))
485 return false;
486
487 bool status;
488 bool lane_reversal = false;
489 status = read_params_internal(dut, recvn, lane_reversal, params);
490 if (!status)
491 {
492 lane_reversal = true;
493 status = read_params_internal(dut, recvn, lane_reversal, params);
494 }
495
496 margin_restore_link(&link);
497
498 return status;
499}
500
501enum margin_test_status
502margin_process_args(struct margin_dev *dev, struct margin_args *args)
503{
504 u8 receivers_n = 2 + 2 * dev->retimers_n;
505
506 if (!args->recvs_n)
507 {
508 for (int i = 1; i < receivers_n; i++)
509 args->recvs[i - 1] = i;
510 args->recvs[receivers_n - 1] = 6;
511 args->recvs_n = receivers_n;
512 }
513 else
514 {
515 for (int i = 0; i < args->recvs_n; i++)
516 {
517 u8 recvn = args->recvs[i];
518 if (recvn < 1 || recvn > 6 || (recvn != 6 && recvn > receivers_n - 1))
519 {
520 return MARGIN_TEST_ARGS_RECVS;
521 }
522 }
523 }
524
525 if (!args->lanes_n)
526 {
527 args->lanes_n = dev->width;
528 for (int i = 0; i < args->lanes_n; i++)
529 args->lanes[i] = i;
530 }
531 else
532 {
533 for (int i = 0; i < args->lanes_n; i++)
534 {
535 if (args->lanes[i] >= dev->width)
536 {
537 return MARGIN_TEST_ARGS_LANES;
538 }
539 }
540 }
541
542 return MARGIN_TEST_OK;
543}
544
545struct margin_results *
546margin_test_link(struct margin_link *link, struct margin_args *args, u8 *recvs_n)
547{
548 bool status = margin_prep_link(link);
549
550 u8 receivers_n = status ? args->recvs_n : 1;
551 u8 *receivers = args->recvs;
552
c04cf7c0
NP
553 margin_log_link(link);
554
73289e13
NP
555 struct margin_results *results = xmalloc(sizeof(*results) * receivers_n);
556
557 if (!status)
558 {
559 results[0].test_status = MARGIN_TEST_ASPM;
c04cf7c0 560 margin_log("\nCouldn't disable ASPM on the given Link.\n");
73289e13
NP
561 }
562
563 if (status)
564 {
565 struct margin_dev *dut;
566 for (int i = 0; i < receivers_n; i++)
567 {
568 dut = receivers[i] == 6 ? &link->up_port : &link->down_port;
569 margin_test_receiver(dut, receivers[i], args, &results[i]);
570 }
571
572 margin_restore_link(link);
573 }
574
575 *recvs_n = receivers_n;
576 return results;
577}
578
579void
580margin_free_results(struct margin_results *results, u8 results_n)
581{
582 for (int i = 0; i < results_n; i++)
583 {
584 if (results[i].test_status == MARGIN_TEST_OK)
585 free(results[i].lanes);
586 }
587 free(results);
588}