2 * SGI UltraViolet TLB flush routines.
4 * (c) 2008-2014 Cliff Wickman <cpw@sgi.com>, SGI.
6 * This code is released under the GNU General Public License version 2 or
9 #include <linux/seq_file.h>
10 #include <linux/proc_fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/delay.h>
16 #include <asm/mmu_context.h>
17 #include <asm/uv/uv.h>
18 #include <asm/uv/uv_mmrs.h>
19 #include <asm/uv/uv_hub.h>
20 #include <asm/uv/uv_bau.h>
23 #include <asm/irq_vectors.h>
24 #include <asm/timer.h>
26 static struct bau_operations ops
;
28 static struct bau_operations uv123_bau_ops
= {
29 .bau_gpa_to_offset
= uv_gpa_to_offset
,
30 .read_l_sw_ack
= read_mmr_sw_ack
,
31 .read_g_sw_ack
= read_gmmr_sw_ack
,
32 .write_l_sw_ack
= write_mmr_sw_ack
,
33 .write_g_sw_ack
= write_gmmr_sw_ack
,
34 .write_payload_first
= write_mmr_payload_first
,
35 .write_payload_last
= write_mmr_payload_last
,
38 static struct bau_operations uv4_bau_ops
= {
39 .bau_gpa_to_offset
= uv_gpa_to_soc_phys_ram
,
40 .read_l_sw_ack
= read_mmr_proc_sw_ack
,
41 .read_g_sw_ack
= read_gmmr_proc_sw_ack
,
42 .write_l_sw_ack
= write_mmr_proc_sw_ack
,
43 .write_g_sw_ack
= write_gmmr_proc_sw_ack
,
44 .write_payload_first
= write_mmr_proc_payload_first
,
45 .write_payload_last
= write_mmr_proc_payload_last
,
49 /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
50 static int timeout_base_ns
[] = {
61 static int timeout_us
;
62 static bool nobau
= true;
63 static int nobau_perm
;
64 static cycles_t congested_cycles
;
67 static int max_concurr
= MAX_BAU_CONCURRENT
;
68 static int max_concurr_const
= MAX_BAU_CONCURRENT
;
69 static int plugged_delay
= PLUGGED_DELAY
;
70 static int plugsb4reset
= PLUGSB4RESET
;
71 static int giveup_limit
= GIVEUP_LIMIT
;
72 static int timeoutsb4reset
= TIMEOUTSB4RESET
;
73 static int ipi_reset_limit
= IPI_RESET_LIMIT
;
74 static int complete_threshold
= COMPLETE_THRESHOLD
;
75 static int congested_respns_us
= CONGESTED_RESPONSE_US
;
76 static int congested_reps
= CONGESTED_REPS
;
77 static int disabled_period
= DISABLED_PERIOD
;
79 static struct tunables tunables
[] = {
80 {&max_concurr
, MAX_BAU_CONCURRENT
}, /* must be [0] */
81 {&plugged_delay
, PLUGGED_DELAY
},
82 {&plugsb4reset
, PLUGSB4RESET
},
83 {&timeoutsb4reset
, TIMEOUTSB4RESET
},
84 {&ipi_reset_limit
, IPI_RESET_LIMIT
},
85 {&complete_threshold
, COMPLETE_THRESHOLD
},
86 {&congested_respns_us
, CONGESTED_RESPONSE_US
},
87 {&congested_reps
, CONGESTED_REPS
},
88 {&disabled_period
, DISABLED_PERIOD
},
89 {&giveup_limit
, GIVEUP_LIMIT
}
92 static struct dentry
*tunables_dir
;
93 static struct dentry
*tunables_file
;
95 /* these correspond to the statistics printed by ptc_seq_show() */
96 static char *stat_description
[] = {
97 "sent: number of shootdown messages sent",
98 "stime: time spent sending messages",
99 "numuvhubs: number of hubs targeted with shootdown",
100 "numuvhubs16: number times 16 or more hubs targeted",
101 "numuvhubs8: number times 8 or more hubs targeted",
102 "numuvhubs4: number times 4 or more hubs targeted",
103 "numuvhubs2: number times 2 or more hubs targeted",
104 "numuvhubs1: number times 1 hub targeted",
105 "numcpus: number of cpus targeted with shootdown",
106 "dto: number of destination timeouts",
107 "retries: destination timeout retries sent",
108 "rok: : destination timeouts successfully retried",
109 "resetp: ipi-style resource resets for plugs",
110 "resett: ipi-style resource resets for timeouts",
111 "giveup: fall-backs to ipi-style shootdowns",
112 "sto: number of source timeouts",
113 "bz: number of stay-busy's",
114 "throt: number times spun in throttle",
115 "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
116 "recv: shootdown messages received",
117 "rtime: time spent processing messages",
118 "all: shootdown all-tlb messages",
119 "one: shootdown one-tlb messages",
120 "mult: interrupts that found multiple messages",
121 "none: interrupts that found no messages",
122 "retry: number of retry messages processed",
123 "canc: number messages canceled by retries",
124 "nocan: number retries that found nothing to cancel",
125 "reset: number of ipi-style reset requests processed",
126 "rcan: number messages canceled by reset requests",
127 "disable: number times use of the BAU was disabled",
128 "enable: number times use of the BAU was re-enabled"
131 static int __init
setup_bau(char *arg
)
138 result
= strtobool(arg
, &nobau
);
142 /* we need to flip the logic here, so that bau=y sets nobau to false */
146 pr_info("UV BAU Enabled\n");
148 pr_info("UV BAU Disabled\n");
152 early_param("bau", setup_bau
);
154 /* base pnode in this partition */
155 static int uv_base_pnode __read_mostly
;
157 static DEFINE_PER_CPU(struct ptc_stats
, ptcstats
);
158 static DEFINE_PER_CPU(struct bau_control
, bau_control
);
159 static DEFINE_PER_CPU(cpumask_var_t
, uv_flush_tlb_mask
);
165 struct bau_control
*bcp
;
168 pr_info("BAU not initialized; cannot be turned on\n");
172 for_each_present_cpu(cpu
) {
173 bcp
= &per_cpu(bau_control
, cpu
);
176 pr_info("BAU turned on\n");
184 struct bau_control
*bcp
;
187 for_each_present_cpu(cpu
) {
188 bcp
= &per_cpu(bau_control
, cpu
);
191 pr_info("BAU turned off\n");
196 * Determine the first node on a uvhub. 'Nodes' are used for kernel
199 static int __init
uvhub_to_first_node(int uvhub
)
203 for_each_online_node(node
) {
204 b
= uv_node_to_blade_id(node
);
212 * Determine the apicid of the first cpu on a uvhub.
214 static int __init
uvhub_to_first_apicid(int uvhub
)
218 for_each_present_cpu(cpu
)
219 if (uvhub
== uv_cpu_to_blade_id(cpu
))
220 return per_cpu(x86_cpu_to_apicid
, cpu
);
225 * Free a software acknowledge hardware resource by clearing its Pending
226 * bit. This will return a reply to the sender.
227 * If the message has timed out, a reply has already been sent by the
228 * hardware but the resource has not been released. In that case our
229 * clear of the Timeout bit (as well) will free the resource. No reply will
230 * be sent (the hardware will only do one reply per message).
232 static void reply_to_message(struct msg_desc
*mdp
, struct bau_control
*bcp
,
236 struct bau_pq_entry
*msg
;
239 if (!msg
->canceled
&& do_acknowledge
) {
240 dw
= (msg
->swack_vec
<< UV_SW_ACK_NPENDING
) | msg
->swack_vec
;
241 ops
.write_l_sw_ack(dw
);
248 * Process the receipt of a RETRY message
250 static void bau_process_retry_msg(struct msg_desc
*mdp
,
251 struct bau_control
*bcp
)
254 int cancel_count
= 0;
255 unsigned long msg_res
;
256 unsigned long mmr
= 0;
257 struct bau_pq_entry
*msg
= mdp
->msg
;
258 struct bau_pq_entry
*msg2
;
259 struct ptc_stats
*stat
= bcp
->statp
;
263 * cancel any message from msg+1 to the retry itself
265 for (msg2
= msg
+1, i
= 0; i
< DEST_Q_SIZE
; msg2
++, i
++) {
266 if (msg2
> mdp
->queue_last
)
267 msg2
= mdp
->queue_first
;
271 /* same conditions for cancellation as do_reset */
272 if ((msg2
->replied_to
== 0) && (msg2
->canceled
== 0) &&
273 (msg2
->swack_vec
) && ((msg2
->swack_vec
&
274 msg
->swack_vec
) == 0) &&
275 (msg2
->sending_cpu
== msg
->sending_cpu
) &&
276 (msg2
->msg_type
!= MSG_NOOP
)) {
277 mmr
= ops
.read_l_sw_ack();
278 msg_res
= msg2
->swack_vec
;
280 * This is a message retry; clear the resources held
281 * by the previous message only if they timed out.
282 * If it has not timed out we have an unexpected
283 * situation to report.
285 if (mmr
& (msg_res
<< UV_SW_ACK_NPENDING
)) {
288 * Is the resource timed out?
289 * Make everyone ignore the cancelled message.
294 mr
= (msg_res
<< UV_SW_ACK_NPENDING
) | msg_res
;
295 ops
.write_l_sw_ack(mr
);
300 stat
->d_nocanceled
++;
304 * Do all the things a cpu should do for a TLB shootdown message.
305 * Other cpu's may come here at the same time for this message.
307 static void bau_process_message(struct msg_desc
*mdp
, struct bau_control
*bcp
,
310 short socket_ack_count
= 0;
312 struct atomic_short
*asp
;
313 struct ptc_stats
*stat
= bcp
->statp
;
314 struct bau_pq_entry
*msg
= mdp
->msg
;
315 struct bau_control
*smaster
= bcp
->socket_master
;
318 * This must be a normal message, or retry of a normal message
320 if (msg
->address
== TLB_FLUSH_ALL
) {
324 __flush_tlb_one(msg
->address
);
330 * One cpu on each uvhub has the additional job on a RETRY
331 * of releasing the resource held by the message that is
332 * being retried. That message is identified by sending
335 if (msg
->msg_type
== MSG_RETRY
&& bcp
== bcp
->uvhub_master
)
336 bau_process_retry_msg(mdp
, bcp
);
339 * This is a swack message, so we have to reply to it.
340 * Count each responding cpu on the socket. This avoids
341 * pinging the count's cache line back and forth between
344 sp
= &smaster
->socket_acknowledge_count
[mdp
->msg_slot
];
345 asp
= (struct atomic_short
*)sp
;
346 socket_ack_count
= atom_asr(1, asp
);
347 if (socket_ack_count
== bcp
->cpus_in_socket
) {
350 * Both sockets dump their completed count total into
351 * the message's count.
354 asp
= (struct atomic_short
*)&msg
->acknowledge_count
;
355 msg_ack_count
= atom_asr(socket_ack_count
, asp
);
357 if (msg_ack_count
== bcp
->cpus_in_uvhub
) {
359 * All cpus in uvhub saw it; reply
360 * (unless we are in the UV2 workaround)
362 reply_to_message(mdp
, bcp
, do_acknowledge
);
370 * Determine the first cpu on a pnode.
372 static int pnode_to_first_cpu(int pnode
, struct bau_control
*smaster
)
375 struct hub_and_pnode
*hpp
;
377 for_each_present_cpu(cpu
) {
378 hpp
= &smaster
->thp
[cpu
];
379 if (pnode
== hpp
->pnode
)
386 * Last resort when we get a large number of destination timeouts is
387 * to clear resources held by a given cpu.
388 * Do this with IPI so that all messages in the BAU message queue
389 * can be identified by their nonzero swack_vec field.
391 * This is entered for a single cpu on the uvhub.
392 * The sender want's this uvhub to free a specific message's
395 static void do_reset(void *ptr
)
398 struct bau_control
*bcp
= &per_cpu(bau_control
, smp_processor_id());
399 struct reset_args
*rap
= (struct reset_args
*)ptr
;
400 struct bau_pq_entry
*msg
;
401 struct ptc_stats
*stat
= bcp
->statp
;
405 * We're looking for the given sender, and
406 * will free its swack resource.
407 * If all cpu's finally responded after the timeout, its
408 * message 'replied_to' was set.
410 for (msg
= bcp
->queue_first
, i
= 0; i
< DEST_Q_SIZE
; msg
++, i
++) {
411 unsigned long msg_res
;
412 /* do_reset: same conditions for cancellation as
413 bau_process_retry_msg() */
414 if ((msg
->replied_to
== 0) &&
415 (msg
->canceled
== 0) &&
416 (msg
->sending_cpu
== rap
->sender
) &&
418 (msg
->msg_type
!= MSG_NOOP
)) {
422 * make everyone else ignore this message
426 * only reset the resource if it is still pending
428 mmr
= ops
.read_l_sw_ack();
429 msg_res
= msg
->swack_vec
;
430 mr
= (msg_res
<< UV_SW_ACK_NPENDING
) | msg_res
;
433 ops
.write_l_sw_ack(mr
);
441 * Use IPI to get all target uvhubs to release resources held by
442 * a given sending cpu number.
444 static void reset_with_ipi(struct pnmask
*distribution
, struct bau_control
*bcp
)
449 int sender
= bcp
->cpu
;
450 cpumask_t
*mask
= bcp
->uvhub_master
->cpumask
;
451 struct bau_control
*smaster
= bcp
->socket_master
;
452 struct reset_args reset_args
;
454 reset_args
.sender
= sender
;
456 /* find a single cpu for each uvhub in this distribution mask */
457 maskbits
= sizeof(struct pnmask
) * BITSPERBYTE
;
458 /* each bit is a pnode relative to the partition base pnode */
459 for (pnode
= 0; pnode
< maskbits
; pnode
++) {
461 if (!bau_uvhub_isset(pnode
, distribution
))
463 apnode
= pnode
+ bcp
->partition_base_pnode
;
464 cpu
= pnode_to_first_cpu(apnode
, smaster
);
465 cpumask_set_cpu(cpu
, mask
);
468 /* IPI all cpus; preemption is already disabled */
469 smp_call_function_many(mask
, do_reset
, (void *)&reset_args
, 1);
474 * Not to be confused with cycles_2_ns() from tsc.c; this gives a relative
475 * number, not an absolute. It converts a duration in cycles to a duration in
478 static inline unsigned long long cycles_2_ns(unsigned long long cyc
)
480 struct cyc2ns_data
*data
= cyc2ns_read_begin();
481 unsigned long long ns
;
483 ns
= mul_u64_u32_shr(cyc
, data
->cyc2ns_mul
, data
->cyc2ns_shift
);
485 cyc2ns_read_end(data
);
490 * The reverse of the above; converts a duration in ns to a duration in cycles.
492 static inline unsigned long long ns_2_cycles(unsigned long long ns
)
494 struct cyc2ns_data
*data
= cyc2ns_read_begin();
495 unsigned long long cyc
;
497 cyc
= (ns
<< data
->cyc2ns_shift
) / data
->cyc2ns_mul
;
499 cyc2ns_read_end(data
);
503 static inline unsigned long cycles_2_us(unsigned long long cyc
)
505 return cycles_2_ns(cyc
) / NSEC_PER_USEC
;
508 static inline cycles_t
sec_2_cycles(unsigned long sec
)
510 return ns_2_cycles(sec
* NSEC_PER_SEC
);
513 static inline unsigned long long usec_2_cycles(unsigned long usec
)
515 return ns_2_cycles(usec
* NSEC_PER_USEC
);
519 * wait for all cpus on this hub to finish their sends and go quiet
520 * leaves uvhub_quiesce set so that no new broadcasts are started by
521 * bau_flush_send_and_wait()
523 static inline void quiesce_local_uvhub(struct bau_control
*hmaster
)
525 atom_asr(1, (struct atomic_short
*)&hmaster
->uvhub_quiesce
);
529 * mark this quiet-requestor as done
531 static inline void end_uvhub_quiesce(struct bau_control
*hmaster
)
533 atom_asr(-1, (struct atomic_short
*)&hmaster
->uvhub_quiesce
);
536 static unsigned long uv1_read_status(unsigned long mmr_offset
, int right_shift
)
538 unsigned long descriptor_status
;
540 descriptor_status
= uv_read_local_mmr(mmr_offset
);
541 descriptor_status
>>= right_shift
;
542 descriptor_status
&= UV_ACT_STATUS_MASK
;
543 return descriptor_status
;
547 * Wait for completion of a broadcast software ack message
548 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
550 static int uv1_wait_completion(struct bau_desc
*bau_desc
,
551 unsigned long mmr_offset
, int right_shift
,
552 struct bau_control
*bcp
, long try)
554 unsigned long descriptor_status
;
556 struct ptc_stats
*stat
= bcp
->statp
;
558 descriptor_status
= uv1_read_status(mmr_offset
, right_shift
);
559 /* spin on the status MMR, waiting for it to go idle */
560 while ((descriptor_status
!= DS_IDLE
)) {
562 * Our software ack messages may be blocked because
563 * there are no swack resources available. As long
564 * as none of them has timed out hardware will NACK
565 * our message and its state will stay IDLE.
567 if (descriptor_status
== DS_SOURCE_TIMEOUT
) {
570 } else if (descriptor_status
== DS_DESTINATION_TIMEOUT
) {
575 * Our retries may be blocked by all destination
576 * swack resources being consumed, and a timeout
577 * pending. In that case hardware returns the
578 * ERROR that looks like a destination timeout.
580 if (cycles_2_us(ttm
- bcp
->send_message
) < timeout_us
) {
581 bcp
->conseccompletes
= 0;
582 return FLUSH_RETRY_PLUGGED
;
585 bcp
->conseccompletes
= 0;
586 return FLUSH_RETRY_TIMEOUT
;
589 * descriptor_status is still BUSY
593 descriptor_status
= uv1_read_status(mmr_offset
, right_shift
);
595 bcp
->conseccompletes
++;
596 return FLUSH_COMPLETE
;
600 * UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register.
601 * But not currently used.
603 static unsigned long uv2_3_read_status(unsigned long offset
, int rshft
, int desc
)
605 return ((read_lmmr(offset
) >> rshft
) & UV_ACT_STATUS_MASK
) << 1;
609 * Return whether the status of the descriptor that is normally used for this
610 * cpu (the one indexed by its hub-relative cpu number) is busy.
611 * The status of the original 32 descriptors is always reflected in the 64
612 * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
613 * The bit provided by the activation_status_2 register is irrelevant to
614 * the status if it is only being tested for busy or not busy.
616 int normal_busy(struct bau_control
*bcp
)
618 int cpu
= bcp
->uvhub_cpu
;
622 mmr_offset
= UVH_LB_BAU_SB_ACTIVATION_STATUS_0
;
623 right_shift
= cpu
* UV_ACT_STATUS_SIZE
;
624 return (((((read_lmmr(mmr_offset
) >> right_shift
) &
625 UV_ACT_STATUS_MASK
)) << 1) == UV2H_DESC_BUSY
);
629 * Entered when a bau descriptor has gone into a permanent busy wait because
631 * Workaround the bug.
633 int handle_uv2_busy(struct bau_control
*bcp
)
635 struct ptc_stats
*stat
= bcp
->statp
;
642 static int uv2_3_wait_completion(struct bau_desc
*bau_desc
,
643 unsigned long mmr_offset
, int right_shift
,
644 struct bau_control
*bcp
, long try)
646 unsigned long descriptor_stat
;
648 int desc
= bcp
->uvhub_cpu
;
650 struct ptc_stats
*stat
= bcp
->statp
;
652 descriptor_stat
= uv2_3_read_status(mmr_offset
, right_shift
, desc
);
654 /* spin on the status MMR, waiting for it to go idle */
655 while (descriptor_stat
!= UV2H_DESC_IDLE
) {
656 if ((descriptor_stat
== UV2H_DESC_SOURCE_TIMEOUT
)) {
658 * A h/w bug on the destination side may
659 * have prevented the message being marked
660 * pending, thus it doesn't get replied to
661 * and gets continually nacked until it times
662 * out with a SOURCE_TIMEOUT.
666 } else if (descriptor_stat
== UV2H_DESC_DEST_TIMEOUT
) {
670 * Our retries may be blocked by all destination
671 * swack resources being consumed, and a timeout
672 * pending. In that case hardware returns the
673 * ERROR that looks like a destination timeout.
674 * Without using the extended status we have to
675 * deduce from the short time that this was a
678 if (cycles_2_us(ttm
- bcp
->send_message
) < timeout_us
) {
679 bcp
->conseccompletes
= 0;
681 /* FLUSH_RETRY_PLUGGED causes hang on boot */
685 bcp
->conseccompletes
= 0;
686 /* FLUSH_RETRY_TIMEOUT causes hang on boot */
690 if (busy_reps
> 1000000) {
691 /* not to hammer on the clock */
694 if ((ttm
- bcp
->send_message
) > bcp
->timeout_interval
)
695 return handle_uv2_busy(bcp
);
698 * descriptor_stat is still BUSY
702 descriptor_stat
= uv2_3_read_status(mmr_offset
, right_shift
, desc
);
704 bcp
->conseccompletes
++;
705 return FLUSH_COMPLETE
;
709 * There are 2 status registers; each and array[32] of 2 bits. Set up for
710 * which register to read and position in that register based on cpu in
713 static int wait_completion(struct bau_desc
*bau_desc
, struct bau_control
*bcp
, long try)
716 unsigned long mmr_offset
;
717 int desc
= bcp
->uvhub_cpu
;
719 if (desc
< UV_CPUS_PER_AS
) {
720 mmr_offset
= UVH_LB_BAU_SB_ACTIVATION_STATUS_0
;
721 right_shift
= desc
* UV_ACT_STATUS_SIZE
;
723 mmr_offset
= UVH_LB_BAU_SB_ACTIVATION_STATUS_1
;
724 right_shift
= ((desc
- UV_CPUS_PER_AS
) * UV_ACT_STATUS_SIZE
);
727 if (bcp
->uvhub_version
== 1)
728 return uv1_wait_completion(bau_desc
, mmr_offset
, right_shift
, bcp
, try);
730 return uv2_3_wait_completion(bau_desc
, mmr_offset
, right_shift
, bcp
, try);
734 * Our retries are blocked by all destination sw ack resources being
735 * in use, and a timeout is pending. In that case hardware immediately
736 * returns the ERROR that looks like a destination timeout.
738 static void destination_plugged(struct bau_desc
*bau_desc
,
739 struct bau_control
*bcp
,
740 struct bau_control
*hmaster
, struct ptc_stats
*stat
)
742 udelay(bcp
->plugged_delay
);
743 bcp
->plugged_tries
++;
745 if (bcp
->plugged_tries
>= bcp
->plugsb4reset
) {
746 bcp
->plugged_tries
= 0;
748 quiesce_local_uvhub(hmaster
);
750 spin_lock(&hmaster
->queue_lock
);
751 reset_with_ipi(&bau_desc
->distribution
, bcp
);
752 spin_unlock(&hmaster
->queue_lock
);
754 end_uvhub_quiesce(hmaster
);
757 stat
->s_resets_plug
++;
761 static void destination_timeout(struct bau_desc
*bau_desc
,
762 struct bau_control
*bcp
, struct bau_control
*hmaster
,
763 struct ptc_stats
*stat
)
765 hmaster
->max_concurr
= 1;
766 bcp
->timeout_tries
++;
767 if (bcp
->timeout_tries
>= bcp
->timeoutsb4reset
) {
768 bcp
->timeout_tries
= 0;
770 quiesce_local_uvhub(hmaster
);
772 spin_lock(&hmaster
->queue_lock
);
773 reset_with_ipi(&bau_desc
->distribution
, bcp
);
774 spin_unlock(&hmaster
->queue_lock
);
776 end_uvhub_quiesce(hmaster
);
779 stat
->s_resets_timeout
++;
784 * Stop all cpus on a uvhub from using the BAU for a period of time.
785 * This is reversed by check_enable.
787 static void disable_for_period(struct bau_control
*bcp
, struct ptc_stats
*stat
)
790 struct bau_control
*tbcp
;
791 struct bau_control
*hmaster
;
794 hmaster
= bcp
->uvhub_master
;
795 spin_lock(&hmaster
->disable_lock
);
796 if (!bcp
->baudisabled
) {
797 stat
->s_bau_disabled
++;
799 for_each_present_cpu(tcpu
) {
800 tbcp
= &per_cpu(bau_control
, tcpu
);
801 if (tbcp
->uvhub_master
== hmaster
) {
802 tbcp
->baudisabled
= 1;
803 tbcp
->set_bau_on_time
=
804 tm1
+ bcp
->disabled_period
;
808 spin_unlock(&hmaster
->disable_lock
);
811 static void count_max_concurr(int stat
, struct bau_control
*bcp
,
812 struct bau_control
*hmaster
)
814 bcp
->plugged_tries
= 0;
815 bcp
->timeout_tries
= 0;
816 if (stat
!= FLUSH_COMPLETE
)
818 if (bcp
->conseccompletes
<= bcp
->complete_threshold
)
820 if (hmaster
->max_concurr
>= hmaster
->max_concurr_const
)
822 hmaster
->max_concurr
++;
825 static void record_send_stats(cycles_t time1
, cycles_t time2
,
826 struct bau_control
*bcp
, struct ptc_stats
*stat
,
827 int completion_status
, int try)
832 elapsed
= time2
- time1
;
833 stat
->s_time
+= elapsed
;
835 if ((completion_status
== FLUSH_COMPLETE
) && (try == 1)) {
836 bcp
->period_requests
++;
837 bcp
->period_time
+= elapsed
;
838 if ((elapsed
> congested_cycles
) &&
839 (bcp
->period_requests
> bcp
->cong_reps
) &&
840 ((bcp
->period_time
/ bcp
->period_requests
) >
843 disable_for_period(bcp
, stat
);
849 if (completion_status
== FLUSH_COMPLETE
&& try > 1)
851 else if (completion_status
== FLUSH_GIVEUP
) {
853 if (get_cycles() > bcp
->period_end
)
854 bcp
->period_giveups
= 0;
855 bcp
->period_giveups
++;
856 if (bcp
->period_giveups
== 1)
857 bcp
->period_end
= get_cycles() + bcp
->disabled_period
;
858 if (bcp
->period_giveups
> bcp
->giveup_limit
) {
859 disable_for_period(bcp
, stat
);
860 stat
->s_giveuplimit
++;
866 * Because of a uv1 hardware bug only a limited number of concurrent
867 * requests can be made.
869 static void uv1_throttle(struct bau_control
*hmaster
, struct ptc_stats
*stat
)
871 spinlock_t
*lock
= &hmaster
->uvhub_lock
;
874 v
= &hmaster
->active_descriptor_count
;
875 if (!atomic_inc_unless_ge(lock
, v
, hmaster
->max_concurr
)) {
879 } while (!atomic_inc_unless_ge(lock
, v
, hmaster
->max_concurr
));
884 * Handle the completion status of a message send.
886 static void handle_cmplt(int completion_status
, struct bau_desc
*bau_desc
,
887 struct bau_control
*bcp
, struct bau_control
*hmaster
,
888 struct ptc_stats
*stat
)
890 if (completion_status
== FLUSH_RETRY_PLUGGED
)
891 destination_plugged(bau_desc
, bcp
, hmaster
, stat
);
892 else if (completion_status
== FLUSH_RETRY_TIMEOUT
)
893 destination_timeout(bau_desc
, bcp
, hmaster
, stat
);
897 * Send a broadcast and wait for it to complete.
899 * The flush_mask contains the cpus the broadcast is to be sent to including
900 * cpus that are on the local uvhub.
902 * Returns 0 if all flushing represented in the mask was done.
903 * Returns 1 if it gives up entirely and the original cpu mask is to be
904 * returned to the kernel.
906 int uv_flush_send_and_wait(struct cpumask
*flush_mask
, struct bau_control
*bcp
,
907 struct bau_desc
*bau_desc
)
910 int completion_stat
= 0;
916 struct ptc_stats
*stat
= bcp
->statp
;
917 struct bau_control
*hmaster
= bcp
->uvhub_master
;
918 struct uv1_bau_msg_header
*uv1_hdr
= NULL
;
919 struct uv2_3_bau_msg_header
*uv2_3_hdr
= NULL
;
921 if (bcp
->uvhub_version
== 1) {
923 uv1_throttle(hmaster
, stat
);
926 while (hmaster
->uvhub_quiesce
)
929 time1
= get_cycles();
931 uv1_hdr
= &bau_desc
->header
.uv1_hdr
;
934 uv2_3_hdr
= &bau_desc
->header
.uv2_3_hdr
;
939 uv1_hdr
->msg_type
= MSG_REGULAR
;
941 uv2_3_hdr
->msg_type
= MSG_REGULAR
;
942 seq_number
= bcp
->message_number
++;
945 uv1_hdr
->msg_type
= MSG_RETRY
;
947 uv2_3_hdr
->msg_type
= MSG_RETRY
;
948 stat
->s_retry_messages
++;
952 uv1_hdr
->sequence
= seq_number
;
954 uv2_3_hdr
->sequence
= seq_number
;
955 index
= (1UL << AS_PUSH_SHIFT
) | bcp
->uvhub_cpu
;
956 bcp
->send_message
= get_cycles();
958 write_mmr_activation(index
);
961 completion_stat
= wait_completion(bau_desc
, bcp
, try);
963 handle_cmplt(completion_stat
, bau_desc
, bcp
, hmaster
, stat
);
965 if (bcp
->ipi_attempts
>= bcp
->ipi_reset_limit
) {
966 bcp
->ipi_attempts
= 0;
967 stat
->s_overipilimit
++;
968 completion_stat
= FLUSH_GIVEUP
;
972 } while ((completion_stat
== FLUSH_RETRY_PLUGGED
) ||
973 (completion_stat
== FLUSH_RETRY_TIMEOUT
));
975 time2
= get_cycles();
977 count_max_concurr(completion_stat
, bcp
, hmaster
);
979 while (hmaster
->uvhub_quiesce
)
982 atomic_dec(&hmaster
->active_descriptor_count
);
984 record_send_stats(time1
, time2
, bcp
, stat
, completion_stat
, try);
986 if (completion_stat
== FLUSH_GIVEUP
)
987 /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
993 * The BAU is disabled for this uvhub. When the disabled time period has
994 * expired re-enable it.
995 * Return 0 if it is re-enabled for all cpus on this uvhub.
997 static int check_enable(struct bau_control
*bcp
, struct ptc_stats
*stat
)
1000 struct bau_control
*tbcp
;
1001 struct bau_control
*hmaster
;
1003 hmaster
= bcp
->uvhub_master
;
1004 spin_lock(&hmaster
->disable_lock
);
1005 if (bcp
->baudisabled
&& (get_cycles() >= bcp
->set_bau_on_time
)) {
1006 stat
->s_bau_reenabled
++;
1007 for_each_present_cpu(tcpu
) {
1008 tbcp
= &per_cpu(bau_control
, tcpu
);
1009 if (tbcp
->uvhub_master
== hmaster
) {
1010 tbcp
->baudisabled
= 0;
1011 tbcp
->period_requests
= 0;
1012 tbcp
->period_time
= 0;
1013 tbcp
->period_giveups
= 0;
1016 spin_unlock(&hmaster
->disable_lock
);
1019 spin_unlock(&hmaster
->disable_lock
);
1023 static void record_send_statistics(struct ptc_stats
*stat
, int locals
, int hubs
,
1024 int remotes
, struct bau_desc
*bau_desc
)
1026 stat
->s_requestor
++;
1027 stat
->s_ntargcpu
+= remotes
+ locals
;
1028 stat
->s_ntargremotes
+= remotes
;
1029 stat
->s_ntarglocals
+= locals
;
1031 /* uvhub statistics */
1032 hubs
= bau_uvhub_weight(&bau_desc
->distribution
);
1034 stat
->s_ntarglocaluvhub
++;
1035 stat
->s_ntargremoteuvhub
+= (hubs
- 1);
1037 stat
->s_ntargremoteuvhub
+= hubs
;
1039 stat
->s_ntarguvhub
+= hubs
;
1042 stat
->s_ntarguvhub16
++;
1044 stat
->s_ntarguvhub8
++;
1046 stat
->s_ntarguvhub4
++;
1048 stat
->s_ntarguvhub2
++;
1050 stat
->s_ntarguvhub1
++;
1054 * Translate a cpu mask to the uvhub distribution mask in the BAU
1055 * activation descriptor.
1057 static int set_distrib_bits(struct cpumask
*flush_mask
, struct bau_control
*bcp
,
1058 struct bau_desc
*bau_desc
, int *localsp
, int *remotesp
)
1063 struct hub_and_pnode
*hpp
;
1065 for_each_cpu(cpu
, flush_mask
) {
1067 * The distribution vector is a bit map of pnodes, relative
1068 * to the partition base pnode (and the partition base nasid
1070 * Translate cpu to pnode and hub using a local memory array.
1072 hpp
= &bcp
->socket_master
->thp
[cpu
];
1073 pnode
= hpp
->pnode
- bcp
->partition_base_pnode
;
1074 bau_uvhub_set(pnode
, &bau_desc
->distribution
);
1076 if (hpp
->uvhub
== bcp
->uvhub
)
1087 * globally purge translation cache of a virtual address or all TLB's
1088 * @cpumask: mask of all cpu's in which the address is to be removed
1089 * @mm: mm_struct containing virtual address range
1090 * @start: start virtual address to be removed from TLB
1091 * @end: end virtual address to be remove from TLB
1092 * @cpu: the current cpu
1094 * This is the entry point for initiating any UV global TLB shootdown.
1096 * Purges the translation caches of all specified processors of the given
1097 * virtual address, or purges all TLB's on specified processors.
1099 * The caller has derived the cpumask from the mm_struct. This function
1100 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
1102 * The cpumask is converted into a uvhubmask of the uvhubs containing
1105 * Note that this function should be called with preemption disabled.
1107 * Returns NULL if all remote flushing was done.
1108 * Returns pointer to cpumask if some remote flushing remains to be
1109 * done. The returned pointer is valid till preemption is re-enabled.
1111 const struct cpumask
*uv_flush_tlb_others(const struct cpumask
*cpumask
,
1112 struct mm_struct
*mm
,
1113 unsigned long start
,
1120 struct bau_desc
*bau_desc
;
1121 struct cpumask
*flush_mask
;
1122 struct ptc_stats
*stat
;
1123 struct bau_control
*bcp
;
1124 unsigned long descriptor_status
;
1125 unsigned long status
;
1127 bcp
= &per_cpu(bau_control
, cpu
);
1137 read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0
);
1138 status
= ((descriptor_status
>> (bcp
->uvhub_cpu
*
1139 UV_ACT_STATUS_SIZE
)) & UV_ACT_STATUS_MASK
) << 1;
1140 if (status
== UV2H_DESC_BUSY
)
1145 /* bau was disabled due to slow response */
1146 if (bcp
->baudisabled
) {
1147 if (check_enable(bcp
, stat
)) {
1148 stat
->s_ipifordisabled
++;
1154 * Each sending cpu has a per-cpu mask which it fills from the caller's
1155 * cpu mask. All cpus are converted to uvhubs and copied to the
1156 * activation descriptor.
1158 flush_mask
= (struct cpumask
*)per_cpu(uv_flush_tlb_mask
, cpu
);
1159 /* don't actually do a shootdown of the local cpu */
1160 cpumask_andnot(flush_mask
, cpumask
, cpumask_of(cpu
));
1162 if (cpumask_test_cpu(cpu
, cpumask
))
1163 stat
->s_ntargself
++;
1165 bau_desc
= bcp
->descriptor_base
;
1166 bau_desc
+= (ITEMS_PER_DESC
* bcp
->uvhub_cpu
);
1167 bau_uvhubs_clear(&bau_desc
->distribution
, UV_DISTRIBUTION_SIZE
);
1168 if (set_distrib_bits(flush_mask
, bcp
, bau_desc
, &locals
, &remotes
))
1171 record_send_statistics(stat
, locals
, hubs
, remotes
, bau_desc
);
1173 if (!end
|| (end
- start
) <= PAGE_SIZE
)
1174 bau_desc
->payload
.address
= start
;
1176 bau_desc
->payload
.address
= TLB_FLUSH_ALL
;
1177 bau_desc
->payload
.sending_cpu
= cpu
;
1179 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
1180 * or 1 if it gave up and the original cpumask should be returned.
1182 if (!uv_flush_send_and_wait(flush_mask
, bcp
, bau_desc
))
1189 * Search the message queue for any 'other' unprocessed message with the
1190 * same software acknowledge resource bit vector as the 'msg' message.
1192 struct bau_pq_entry
*find_another_by_swack(struct bau_pq_entry
*msg
,
1193 struct bau_control
*bcp
)
1195 struct bau_pq_entry
*msg_next
= msg
+ 1;
1196 unsigned char swack_vec
= msg
->swack_vec
;
1198 if (msg_next
> bcp
->queue_last
)
1199 msg_next
= bcp
->queue_first
;
1200 while (msg_next
!= msg
) {
1201 if ((msg_next
->canceled
== 0) && (msg_next
->replied_to
== 0) &&
1202 (msg_next
->swack_vec
== swack_vec
))
1205 if (msg_next
> bcp
->queue_last
)
1206 msg_next
= bcp
->queue_first
;
1212 * UV2 needs to work around a bug in which an arriving message has not
1213 * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
1214 * Such a message must be ignored.
1216 void process_uv2_message(struct msg_desc
*mdp
, struct bau_control
*bcp
)
1218 unsigned long mmr_image
;
1219 unsigned char swack_vec
;
1220 struct bau_pq_entry
*msg
= mdp
->msg
;
1221 struct bau_pq_entry
*other_msg
;
1223 mmr_image
= ops
.read_l_sw_ack();
1224 swack_vec
= msg
->swack_vec
;
1226 if ((swack_vec
& mmr_image
) == 0) {
1228 * This message was assigned a swack resource, but no
1229 * reserved acknowlegment is pending.
1230 * The bug has prevented this message from setting the MMR.
1233 * Some message has set the MMR 'pending' bit; it might have
1234 * been another message. Look for that message.
1236 other_msg
= find_another_by_swack(msg
, bcp
);
1239 * There is another. Process this one but do not
1242 bau_process_message(mdp
, bcp
, 0);
1244 * Let the natural processing of that other message
1245 * acknowledge it. Don't get the processing of sw_ack's
1253 * Either the MMR shows this one pending a reply or there is no
1254 * other message using this sw_ack, so it is safe to acknowledge it.
1256 bau_process_message(mdp
, bcp
, 1);
1262 * The BAU message interrupt comes here. (registered by set_intr_gate)
1265 * We received a broadcast assist message.
1267 * Interrupts are disabled; this interrupt could represent
1268 * the receipt of several messages.
1270 * All cores/threads on this hub get this interrupt.
1271 * The last one to see it does the software ack.
1272 * (the resource will not be freed until noninterruptable cpus see this
1273 * interrupt; hardware may timeout the s/w ack and reply ERROR)
1275 void uv_bau_message_interrupt(struct pt_regs
*regs
)
1278 cycles_t time_start
;
1279 struct bau_pq_entry
*msg
;
1280 struct bau_control
*bcp
;
1281 struct ptc_stats
*stat
;
1282 struct msg_desc msgdesc
;
1285 time_start
= get_cycles();
1287 bcp
= &per_cpu(bau_control
, smp_processor_id());
1290 msgdesc
.queue_first
= bcp
->queue_first
;
1291 msgdesc
.queue_last
= bcp
->queue_last
;
1293 msg
= bcp
->bau_msg_head
;
1294 while (msg
->swack_vec
) {
1297 msgdesc
.msg_slot
= msg
- msgdesc
.queue_first
;
1299 if (bcp
->uvhub_version
== 2)
1300 process_uv2_message(&msgdesc
, bcp
);
1302 /* no error workaround for uv1 or uv3 */
1303 bau_process_message(&msgdesc
, bcp
, 1);
1306 if (msg
> msgdesc
.queue_last
)
1307 msg
= msgdesc
.queue_first
;
1308 bcp
->bau_msg_head
= msg
;
1310 stat
->d_time
+= (get_cycles() - time_start
);
1318 * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
1319 * shootdown message timeouts enabled. The timeout does not cause
1320 * an interrupt, but causes an error message to be returned to
1323 static void __init
enable_timeouts(void)
1328 unsigned long mmr_image
;
1330 nuvhubs
= uv_num_possible_blades();
1332 for (uvhub
= 0; uvhub
< nuvhubs
; uvhub
++) {
1333 if (!uv_blade_nr_possible_cpus(uvhub
))
1336 pnode
= uv_blade_to_pnode(uvhub
);
1337 mmr_image
= read_mmr_misc_control(pnode
);
1339 * Set the timeout period and then lock it in, in three
1340 * steps; captures and locks in the period.
1342 * To program the period, the SOFT_ACK_MODE must be off.
1344 mmr_image
&= ~(1L << SOFTACK_MSHIFT
);
1345 write_mmr_misc_control(pnode
, mmr_image
);
1347 * Set the 4-bit period.
1349 mmr_image
&= ~((unsigned long)0xf << SOFTACK_PSHIFT
);
1350 mmr_image
|= (SOFTACK_TIMEOUT_PERIOD
<< SOFTACK_PSHIFT
);
1351 write_mmr_misc_control(pnode
, mmr_image
);
1354 * Subsequent reversals of the timebase bit (3) cause an
1355 * immediate timeout of one or all INTD resources as
1356 * indicated in bits 2:0 (7 causes all of them to timeout).
1358 mmr_image
|= (1L << SOFTACK_MSHIFT
);
1360 /* do not touch the legacy mode bit */
1361 /* hw bug workaround; do not use extended status */
1362 mmr_image
&= ~(1L << UV2_EXT_SHFT
);
1363 } else if (is_uv3_hub()) {
1364 mmr_image
&= ~(1L << PREFETCH_HINT_SHFT
);
1365 mmr_image
|= (1L << SB_STATUS_SHFT
);
1367 write_mmr_misc_control(pnode
, mmr_image
);
1371 static void *ptc_seq_start(struct seq_file
*file
, loff_t
*offset
)
1373 if (*offset
< num_possible_cpus())
1378 static void *ptc_seq_next(struct seq_file
*file
, void *data
, loff_t
*offset
)
1381 if (*offset
< num_possible_cpus())
1386 static void ptc_seq_stop(struct seq_file
*file
, void *data
)
1391 * Display the statistics thru /proc/sgi_uv/ptc_statistics
1392 * 'data' points to the cpu number
1393 * Note: see the descriptions in stat_description[].
1395 static int ptc_seq_show(struct seq_file
*file
, void *data
)
1397 struct ptc_stats
*stat
;
1398 struct bau_control
*bcp
;
1401 cpu
= *(loff_t
*)data
;
1404 "# cpu bauoff sent stime self locals remotes ncpus localhub ");
1405 seq_puts(file
, "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
1407 "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries ");
1409 "rok resetp resett giveup sto bz throt disable ");
1411 "enable wars warshw warwaits enters ipidis plugged ");
1413 "ipiover glim cong swack recv rtime all one mult ");
1414 seq_puts(file
, "none retry canc nocan reset rcan\n");
1416 if (cpu
< num_possible_cpus() && cpu_online(cpu
)) {
1417 bcp
= &per_cpu(bau_control
, cpu
);
1419 seq_printf(file
, "cpu %d bau disabled\n", cpu
);
1423 /* source side statistics */
1425 "cpu %d %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1426 cpu
, bcp
->nobau
, stat
->s_requestor
,
1427 cycles_2_us(stat
->s_time
),
1428 stat
->s_ntargself
, stat
->s_ntarglocals
,
1429 stat
->s_ntargremotes
, stat
->s_ntargcpu
,
1430 stat
->s_ntarglocaluvhub
, stat
->s_ntargremoteuvhub
,
1431 stat
->s_ntarguvhub
, stat
->s_ntarguvhub16
);
1432 seq_printf(file
, "%ld %ld %ld %ld %ld %ld ",
1433 stat
->s_ntarguvhub8
, stat
->s_ntarguvhub4
,
1434 stat
->s_ntarguvhub2
, stat
->s_ntarguvhub1
,
1435 stat
->s_dtimeout
, stat
->s_strongnacks
);
1436 seq_printf(file
, "%ld %ld %ld %ld %ld %ld %ld %ld ",
1437 stat
->s_retry_messages
, stat
->s_retriesok
,
1438 stat
->s_resets_plug
, stat
->s_resets_timeout
,
1439 stat
->s_giveup
, stat
->s_stimeout
,
1440 stat
->s_busy
, stat
->s_throttles
);
1441 seq_printf(file
, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1442 stat
->s_bau_disabled
, stat
->s_bau_reenabled
,
1443 stat
->s_uv2_wars
, stat
->s_uv2_wars_hw
,
1444 stat
->s_uv2_war_waits
, stat
->s_enters
,
1445 stat
->s_ipifordisabled
, stat
->s_plugged
,
1446 stat
->s_overipilimit
, stat
->s_giveuplimit
,
1449 /* destination side statistics */
1451 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
1452 ops
.read_g_sw_ack(uv_cpu_to_pnode(cpu
)),
1453 stat
->d_requestee
, cycles_2_us(stat
->d_time
),
1454 stat
->d_alltlb
, stat
->d_onetlb
, stat
->d_multmsg
,
1455 stat
->d_nomsg
, stat
->d_retries
, stat
->d_canceled
,
1456 stat
->d_nocanceled
, stat
->d_resets
,
1463 * Display the tunables thru debugfs
1465 static ssize_t
tunables_read(struct file
*file
, char __user
*userbuf
,
1466 size_t count
, loff_t
*ppos
)
1471 buf
= kasprintf(GFP_KERNEL
, "%s %s %s\n%d %d %d %d %d %d %d %d %d %d\n",
1472 "max_concur plugged_delay plugsb4reset timeoutsb4reset",
1473 "ipi_reset_limit complete_threshold congested_response_us",
1474 "congested_reps disabled_period giveup_limit",
1475 max_concurr
, plugged_delay
, plugsb4reset
,
1476 timeoutsb4reset
, ipi_reset_limit
, complete_threshold
,
1477 congested_respns_us
, congested_reps
, disabled_period
,
1483 ret
= simple_read_from_buffer(userbuf
, count
, ppos
, buf
, strlen(buf
));
1489 * handle a write to /proc/sgi_uv/ptc_statistics
1490 * -1: reset the statistics
1491 * 0: display meaning of the statistics
1493 static ssize_t
ptc_proc_write(struct file
*file
, const char __user
*user
,
1494 size_t count
, loff_t
*data
)
1501 struct ptc_stats
*stat
;
1503 if (count
== 0 || count
> sizeof(optstr
))
1505 if (copy_from_user(optstr
, user
, count
))
1507 optstr
[count
- 1] = '\0';
1509 if (!strcmp(optstr
, "on")) {
1512 } else if (!strcmp(optstr
, "off")) {
1517 if (kstrtol(optstr
, 10, &input_arg
) < 0) {
1518 pr_debug("%s is invalid\n", optstr
);
1522 if (input_arg
== 0) {
1523 elements
= ARRAY_SIZE(stat_description
);
1524 pr_debug("# cpu: cpu number\n");
1525 pr_debug("Sender statistics:\n");
1526 for (i
= 0; i
< elements
; i
++)
1527 pr_debug("%s\n", stat_description
[i
]);
1528 } else if (input_arg
== -1) {
1529 for_each_present_cpu(cpu
) {
1530 stat
= &per_cpu(ptcstats
, cpu
);
1531 memset(stat
, 0, sizeof(struct ptc_stats
));
1538 static int local_atoi(const char *name
)
1545 val
= 10*val
+(*name
-'0');
1554 * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
1555 * Zero values reset them to defaults.
1557 static int parse_tunables_write(struct bau_control
*bcp
, char *instr
,
1564 int e
= ARRAY_SIZE(tunables
);
1566 p
= instr
+ strspn(instr
, WHITESPACE
);
1568 for (; *p
; p
= q
+ strspn(q
, WHITESPACE
)) {
1569 q
= p
+ strcspn(p
, WHITESPACE
);
1575 pr_info("bau tunable error: should be %d values\n", e
);
1579 p
= instr
+ strspn(instr
, WHITESPACE
);
1581 for (cnt
= 0; *p
; p
= q
+ strspn(q
, WHITESPACE
), cnt
++) {
1582 q
= p
+ strcspn(p
, WHITESPACE
);
1583 val
= local_atoi(p
);
1587 max_concurr
= MAX_BAU_CONCURRENT
;
1588 max_concurr_const
= MAX_BAU_CONCURRENT
;
1591 if (val
< 1 || val
> bcp
->cpus_in_uvhub
) {
1593 "Error: BAU max concurrent %d is invalid\n",
1598 max_concurr_const
= val
;
1602 *tunables
[cnt
].tunp
= tunables
[cnt
].deflt
;
1604 *tunables
[cnt
].tunp
= val
;
1614 * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
1616 static ssize_t
tunables_write(struct file
*file
, const char __user
*user
,
1617 size_t count
, loff_t
*data
)
1622 struct bau_control
*bcp
;
1624 if (count
== 0 || count
> sizeof(instr
)-1)
1626 if (copy_from_user(instr
, user
, count
))
1629 instr
[count
] = '\0';
1632 bcp
= &per_cpu(bau_control
, cpu
);
1633 ret
= parse_tunables_write(bcp
, instr
, count
);
1638 for_each_present_cpu(cpu
) {
1639 bcp
= &per_cpu(bau_control
, cpu
);
1640 bcp
->max_concurr
= max_concurr
;
1641 bcp
->max_concurr_const
= max_concurr
;
1642 bcp
->plugged_delay
= plugged_delay
;
1643 bcp
->plugsb4reset
= plugsb4reset
;
1644 bcp
->timeoutsb4reset
= timeoutsb4reset
;
1645 bcp
->ipi_reset_limit
= ipi_reset_limit
;
1646 bcp
->complete_threshold
= complete_threshold
;
1647 bcp
->cong_response_us
= congested_respns_us
;
1648 bcp
->cong_reps
= congested_reps
;
1649 bcp
->disabled_period
= sec_2_cycles(disabled_period
);
1650 bcp
->giveup_limit
= giveup_limit
;
1655 static const struct seq_operations uv_ptc_seq_ops
= {
1656 .start
= ptc_seq_start
,
1657 .next
= ptc_seq_next
,
1658 .stop
= ptc_seq_stop
,
1659 .show
= ptc_seq_show
1662 static int ptc_proc_open(struct inode
*inode
, struct file
*file
)
1664 return seq_open(file
, &uv_ptc_seq_ops
);
1667 static int tunables_open(struct inode
*inode
, struct file
*file
)
1672 static const struct file_operations proc_uv_ptc_operations
= {
1673 .open
= ptc_proc_open
,
1675 .write
= ptc_proc_write
,
1676 .llseek
= seq_lseek
,
1677 .release
= seq_release
,
1680 static const struct file_operations tunables_fops
= {
1681 .open
= tunables_open
,
1682 .read
= tunables_read
,
1683 .write
= tunables_write
,
1684 .llseek
= default_llseek
,
1687 static int __init
uv_ptc_init(void)
1689 struct proc_dir_entry
*proc_uv_ptc
;
1691 if (!is_uv_system())
1694 proc_uv_ptc
= proc_create(UV_PTC_BASENAME
, 0444, NULL
,
1695 &proc_uv_ptc_operations
);
1697 pr_err("unable to create %s proc entry\n",
1702 tunables_dir
= debugfs_create_dir(UV_BAU_TUNABLES_DIR
, NULL
);
1703 if (!tunables_dir
) {
1704 pr_err("unable to create debugfs directory %s\n",
1705 UV_BAU_TUNABLES_DIR
);
1708 tunables_file
= debugfs_create_file(UV_BAU_TUNABLES_FILE
, 0600,
1709 tunables_dir
, NULL
, &tunables_fops
);
1710 if (!tunables_file
) {
1711 pr_err("unable to create debugfs file %s\n",
1712 UV_BAU_TUNABLES_FILE
);
1719 * Initialize the sending side's sending buffers.
1721 static void activation_descriptor_init(int node
, int pnode
, int base_pnode
)
1730 struct bau_desc
*bau_desc
;
1731 struct bau_desc
*bd2
;
1732 struct uv1_bau_msg_header
*uv1_hdr
;
1733 struct uv2_3_bau_msg_header
*uv2_3_hdr
;
1734 struct bau_control
*bcp
;
1737 * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
1738 * per cpu; and one per cpu on the uvhub (ADP_SZ)
1740 dsize
= sizeof(struct bau_desc
) * ADP_SZ
* ITEMS_PER_DESC
;
1741 bau_desc
= kmalloc_node(dsize
, GFP_KERNEL
, node
);
1744 gpa
= uv_gpa(bau_desc
);
1745 n
= uv_gpa_to_gnode(gpa
);
1746 m
= ops
.bau_gpa_to_offset(gpa
);
1750 /* the 14-bit pnode */
1751 write_mmr_descriptor_base(pnode
, (n
<< UV_DESC_PSHIFT
| m
));
1753 * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
1754 * cpu even though we only use the first one; one descriptor can
1755 * describe a broadcast to 256 uv hubs.
1757 for (i
= 0, bd2
= bau_desc
; i
< (ADP_SZ
* ITEMS_PER_DESC
); i
++, bd2
++) {
1758 memset(bd2
, 0, sizeof(struct bau_desc
));
1760 uv1_hdr
= &bd2
->header
.uv1_hdr
;
1761 uv1_hdr
->swack_flag
= 1;
1763 * The base_dest_nasid set in the message header
1764 * is the nasid of the first uvhub in the partition.
1765 * The bit map will indicate destination pnode numbers
1766 * relative to that base. They may not be consecutive
1767 * if nasid striding is being used.
1769 uv1_hdr
->base_dest_nasid
=
1770 UV_PNODE_TO_NASID(base_pnode
);
1771 uv1_hdr
->dest_subnodeid
= UV_LB_SUBNODEID
;
1772 uv1_hdr
->command
= UV_NET_ENDPOINT_INTD
;
1773 uv1_hdr
->int_both
= 1;
1775 * all others need to be set to zero:
1776 * fairness chaining multilevel count replied_to
1780 * BIOS uses legacy mode, but uv2 and uv3 hardware always
1781 * uses native mode for selective broadcasts.
1783 uv2_3_hdr
= &bd2
->header
.uv2_3_hdr
;
1784 uv2_3_hdr
->swack_flag
= 1;
1785 uv2_3_hdr
->base_dest_nasid
=
1786 UV_PNODE_TO_NASID(base_pnode
);
1787 uv2_3_hdr
->dest_subnodeid
= UV_LB_SUBNODEID
;
1788 uv2_3_hdr
->command
= UV_NET_ENDPOINT_INTD
;
1791 for_each_present_cpu(cpu
) {
1792 if (pnode
!= uv_blade_to_pnode(uv_cpu_to_blade_id(cpu
)))
1794 bcp
= &per_cpu(bau_control
, cpu
);
1795 bcp
->descriptor_base
= bau_desc
;
1800 * initialize the destination side's receiving buffers
1801 * entered for each uvhub in the partition
1802 * - node is first node (kernel memory notion) on the uvhub
1803 * - pnode is the uvhub's physical identifier
1805 static void pq_init(int node
, int pnode
)
1811 unsigned long gnode
, first
, last
, tail
;
1812 struct bau_pq_entry
*pqp
;
1813 struct bau_control
*bcp
;
1815 plsize
= (DEST_Q_SIZE
+ 1) * sizeof(struct bau_pq_entry
);
1816 vp
= kmalloc_node(plsize
, GFP_KERNEL
, node
);
1817 pqp
= (struct bau_pq_entry
*)vp
;
1820 cp
= (char *)pqp
+ 31;
1821 pqp
= (struct bau_pq_entry
*)(((unsigned long)cp
>> 5) << 5);
1823 for_each_present_cpu(cpu
) {
1824 if (pnode
!= uv_cpu_to_pnode(cpu
))
1826 /* for every cpu on this pnode: */
1827 bcp
= &per_cpu(bau_control
, cpu
);
1828 bcp
->queue_first
= pqp
;
1829 bcp
->bau_msg_head
= pqp
;
1830 bcp
->queue_last
= pqp
+ (DEST_Q_SIZE
- 1);
1833 first
= ops
.bau_gpa_to_offset(uv_gpa(pqp
));
1834 last
= ops
.bau_gpa_to_offset(uv_gpa(pqp
+ (DEST_Q_SIZE
- 1)));
1837 * Pre UV4, the gnode is required to locate the payload queue
1838 * and the payload queue tail must be maintained by the kernel.
1840 bcp
= &per_cpu(bau_control
, smp_processor_id());
1841 if (bcp
->uvhub_version
<= 3) {
1843 gnode
= uv_gpa_to_gnode(uv_gpa(pqp
));
1844 first
= (gnode
<< UV_PAYLOADQ_GNODE_SHIFT
) | tail
;
1845 write_mmr_payload_tail(pnode
, tail
);
1848 ops
.write_payload_first(pnode
, first
);
1849 ops
.write_payload_last(pnode
, last
);
1850 ops
.write_g_sw_ack(pnode
, 0xffffUL
);
1852 /* in effect, all msg_type's are set to MSG_NOOP */
1853 memset(pqp
, 0, sizeof(struct bau_pq_entry
) * DEST_Q_SIZE
);
1857 * Initialization of each UV hub's structures
1859 static void __init
init_uvhub(int uvhub
, int vector
, int base_pnode
)
1863 unsigned long apicid
;
1865 node
= uvhub_to_first_node(uvhub
);
1866 pnode
= uv_blade_to_pnode(uvhub
);
1868 activation_descriptor_init(node
, pnode
, base_pnode
);
1870 pq_init(node
, pnode
);
1872 * The below initialization can't be in firmware because the
1873 * messaging IRQ will be determined by the OS.
1875 apicid
= uvhub_to_first_apicid(uvhub
) | uv_apicid_hibits
;
1876 write_mmr_data_config(pnode
, ((apicid
<< 32) | vector
));
1880 * We will set BAU_MISC_CONTROL with a timeout period.
1881 * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
1882 * So the destination timeout period has to be calculated from them.
1884 static int calculate_destination_timeout(void)
1886 unsigned long mmr_image
;
1892 unsigned long ts_ns
;
1895 mult1
= SOFTACK_TIMEOUT_PERIOD
& BAU_MISC_CONTROL_MULT_MASK
;
1896 mmr_image
= uv_read_local_mmr(UVH_AGING_PRESCALE_SEL
);
1897 index
= (mmr_image
>> BAU_URGENCY_7_SHIFT
) & BAU_URGENCY_7_MASK
;
1898 mmr_image
= uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT
);
1899 mult2
= (mmr_image
>> BAU_TRANS_SHIFT
) & BAU_TRANS_MASK
;
1900 ts_ns
= timeout_base_ns
[index
];
1901 ts_ns
*= (mult1
* mult2
);
1904 /* same destination timeout for uv2 and uv3 */
1905 /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
1906 mmr_image
= uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL
);
1907 mmr_image
= (mmr_image
& UV_SA_MASK
) >> UV_SA_SHFT
;
1908 if (mmr_image
& (1L << UV2_ACK_UNITS_SHFT
))
1912 mult1
= mmr_image
& UV2_ACK_MASK
;
1918 static void __init
init_per_cpu_tunables(void)
1921 struct bau_control
*bcp
;
1923 for_each_present_cpu(cpu
) {
1924 bcp
= &per_cpu(bau_control
, cpu
);
1925 bcp
->baudisabled
= 0;
1928 bcp
->statp
= &per_cpu(ptcstats
, cpu
);
1929 /* time interval to catch a hardware stay-busy bug */
1930 bcp
->timeout_interval
= usec_2_cycles(2*timeout_us
);
1931 bcp
->max_concurr
= max_concurr
;
1932 bcp
->max_concurr_const
= max_concurr
;
1933 bcp
->plugged_delay
= plugged_delay
;
1934 bcp
->plugsb4reset
= plugsb4reset
;
1935 bcp
->timeoutsb4reset
= timeoutsb4reset
;
1936 bcp
->ipi_reset_limit
= ipi_reset_limit
;
1937 bcp
->complete_threshold
= complete_threshold
;
1938 bcp
->cong_response_us
= congested_respns_us
;
1939 bcp
->cong_reps
= congested_reps
;
1940 bcp
->disabled_period
= sec_2_cycles(disabled_period
);
1941 bcp
->giveup_limit
= giveup_limit
;
1942 spin_lock_init(&bcp
->queue_lock
);
1943 spin_lock_init(&bcp
->uvhub_lock
);
1944 spin_lock_init(&bcp
->disable_lock
);
1949 * Scan all cpus to collect blade and socket summaries.
1951 static int __init
get_cpu_topology(int base_pnode
,
1952 struct uvhub_desc
*uvhub_descs
,
1953 unsigned char *uvhub_mask
)
1959 struct bau_control
*bcp
;
1960 struct uvhub_desc
*bdp
;
1961 struct socket_desc
*sdp
;
1963 for_each_present_cpu(cpu
) {
1964 bcp
= &per_cpu(bau_control
, cpu
);
1966 memset(bcp
, 0, sizeof(struct bau_control
));
1968 pnode
= uv_cpu_hub_info(cpu
)->pnode
;
1969 if ((pnode
- base_pnode
) >= UV_DISTRIBUTION_SIZE
) {
1971 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
1972 cpu
, pnode
, base_pnode
, UV_DISTRIBUTION_SIZE
);
1976 bcp
->osnode
= cpu_to_node(cpu
);
1977 bcp
->partition_base_pnode
= base_pnode
;
1979 uvhub
= uv_cpu_hub_info(cpu
)->numa_blade_id
;
1980 *(uvhub_mask
+ (uvhub
/8)) |= (1 << (uvhub
%8));
1981 bdp
= &uvhub_descs
[uvhub
];
1987 /* kludge: 'assuming' one node per socket, and assuming that
1988 disabling a socket just leaves a gap in node numbers */
1989 socket
= bcp
->osnode
& 1;
1990 bdp
->socket_mask
|= (1 << socket
);
1991 sdp
= &bdp
->socket
[socket
];
1992 sdp
->cpu_number
[sdp
->num_cpus
] = cpu
;
1994 if (sdp
->num_cpus
> MAX_CPUS_PER_SOCKET
) {
1995 pr_emerg("%d cpus per socket invalid\n",
2004 * Each socket is to get a local array of pnodes/hubs.
2006 static void make_per_cpu_thp(struct bau_control
*smaster
)
2009 size_t hpsz
= sizeof(struct hub_and_pnode
) * num_possible_cpus();
2011 smaster
->thp
= kmalloc_node(hpsz
, GFP_KERNEL
, smaster
->osnode
);
2012 memset(smaster
->thp
, 0, hpsz
);
2013 for_each_present_cpu(cpu
) {
2014 smaster
->thp
[cpu
].pnode
= uv_cpu_hub_info(cpu
)->pnode
;
2015 smaster
->thp
[cpu
].uvhub
= uv_cpu_hub_info(cpu
)->numa_blade_id
;
2020 * Each uvhub is to get a local cpumask.
2022 static void make_per_hub_cpumask(struct bau_control
*hmaster
)
2024 int sz
= sizeof(cpumask_t
);
2026 hmaster
->cpumask
= kzalloc_node(sz
, GFP_KERNEL
, hmaster
->osnode
);
2030 * Initialize all the per_cpu information for the cpu's on a given socket,
2031 * given what has been gathered into the socket_desc struct.
2032 * And reports the chosen hub and socket masters back to the caller.
2034 static int scan_sock(struct socket_desc
*sdp
, struct uvhub_desc
*bdp
,
2035 struct bau_control
**smasterp
,
2036 struct bau_control
**hmasterp
)
2040 struct bau_control
*bcp
;
2042 for (i
= 0; i
< sdp
->num_cpus
; i
++) {
2043 cpu
= sdp
->cpu_number
[i
];
2044 bcp
= &per_cpu(bau_control
, cpu
);
2051 bcp
->cpus_in_uvhub
= bdp
->num_cpus
;
2052 bcp
->cpus_in_socket
= sdp
->num_cpus
;
2053 bcp
->socket_master
= *smasterp
;
2054 bcp
->uvhub
= bdp
->uvhub
;
2056 bcp
->uvhub_version
= 1;
2057 else if (is_uv2_hub())
2058 bcp
->uvhub_version
= 2;
2059 else if (is_uv3_hub())
2060 bcp
->uvhub_version
= 3;
2061 else if (is_uv4_hub())
2062 bcp
->uvhub_version
= 4;
2064 pr_emerg("uvhub version not 1, 2, 3, or 4\n");
2067 bcp
->uvhub_master
= *hmasterp
;
2068 bcp
->uvhub_cpu
= uv_cpu_blade_processor_id(cpu
);
2070 if (bcp
->uvhub_cpu
>= MAX_CPUS_PER_UVHUB
) {
2071 pr_emerg("%d cpus per uvhub invalid\n",
2080 * Summarize the blade and socket topology into the per_cpu structures.
2082 static int __init
summarize_uvhub_sockets(int nuvhubs
,
2083 struct uvhub_desc
*uvhub_descs
,
2084 unsigned char *uvhub_mask
)
2088 unsigned short socket_mask
;
2090 for (uvhub
= 0; uvhub
< nuvhubs
; uvhub
++) {
2091 struct uvhub_desc
*bdp
;
2092 struct bau_control
*smaster
= NULL
;
2093 struct bau_control
*hmaster
= NULL
;
2095 if (!(*(uvhub_mask
+ (uvhub
/8)) & (1 << (uvhub
%8))))
2098 bdp
= &uvhub_descs
[uvhub
];
2099 socket_mask
= bdp
->socket_mask
;
2101 while (socket_mask
) {
2102 struct socket_desc
*sdp
;
2103 if ((socket_mask
& 1)) {
2104 sdp
= &bdp
->socket
[socket
];
2105 if (scan_sock(sdp
, bdp
, &smaster
, &hmaster
))
2107 make_per_cpu_thp(smaster
);
2110 socket_mask
= (socket_mask
>> 1);
2112 make_per_hub_cpumask(hmaster
);
2118 * initialize the bau_control structure for each cpu
2120 static int __init
init_per_cpu(int nuvhubs
, int base_part_pnode
)
2122 unsigned char *uvhub_mask
;
2124 struct uvhub_desc
*uvhub_descs
;
2126 if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
2127 timeout_us
= calculate_destination_timeout();
2129 vp
= kmalloc(nuvhubs
* sizeof(struct uvhub_desc
), GFP_KERNEL
);
2130 uvhub_descs
= (struct uvhub_desc
*)vp
;
2131 memset(uvhub_descs
, 0, nuvhubs
* sizeof(struct uvhub_desc
));
2132 uvhub_mask
= kzalloc((nuvhubs
+7)/8, GFP_KERNEL
);
2134 if (get_cpu_topology(base_part_pnode
, uvhub_descs
, uvhub_mask
))
2137 if (summarize_uvhub_sockets(nuvhubs
, uvhub_descs
, uvhub_mask
))
2142 init_per_cpu_tunables();
2152 * Initialization of BAU-related structures
2154 static int __init
uv_bau_init(void)
2162 cpumask_var_t
*mask
;
2164 if (!is_uv_system())
2169 else if (is_uv3_hub())
2170 ops
= uv123_bau_ops
;
2171 else if (is_uv2_hub())
2172 ops
= uv123_bau_ops
;
2173 else if (is_uv1_hub())
2174 ops
= uv123_bau_ops
;
2176 for_each_possible_cpu(cur_cpu
) {
2177 mask
= &per_cpu(uv_flush_tlb_mask
, cur_cpu
);
2178 zalloc_cpumask_var_node(mask
, GFP_KERNEL
, cpu_to_node(cur_cpu
));
2181 nuvhubs
= uv_num_possible_blades();
2182 congested_cycles
= usec_2_cycles(congested_respns_us
);
2184 uv_base_pnode
= 0x7fffffff;
2185 for (uvhub
= 0; uvhub
< nuvhubs
; uvhub
++) {
2186 cpus
= uv_blade_nr_possible_cpus(uvhub
);
2187 if (cpus
&& (uv_blade_to_pnode(uvhub
) < uv_base_pnode
))
2188 uv_base_pnode
= uv_blade_to_pnode(uvhub
);
2191 /* software timeouts are not supported on UV4 */
2192 if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
2195 if (init_per_cpu(nuvhubs
, uv_base_pnode
)) {
2201 vector
= UV_BAU_MESSAGE
;
2202 for_each_possible_blade(uvhub
) {
2203 if (uv_blade_nr_possible_cpus(uvhub
))
2204 init_uvhub(uvhub
, vector
, uv_base_pnode
);
2207 alloc_intr_gate(vector
, uv_bau_message_intr1
);
2209 for_each_possible_blade(uvhub
) {
2210 if (uv_blade_nr_possible_cpus(uvhub
)) {
2213 pnode
= uv_blade_to_pnode(uvhub
);
2216 write_gmmr_activation(pnode
, val
);
2217 mmr
= 1; /* should be 1 to broadcast to both sockets */
2219 write_mmr_data_broadcast(pnode
, mmr
);
2225 core_initcall(uv_bau_init
);
2226 fs_initcall(uv_ptc_init
);