]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
ipv6: udp: fix typos in comments
authorAlok Tiwari <alok.a.tiwari@oracle.com>
Tue, 9 Sep 2025 12:26:07 +0000 (05:26 -0700)
committerJakub Kicinski <kuba@kernel.org>
Fri, 12 Sep 2025 01:41:58 +0000 (18:41 -0700)
Correct typos in ipv6/udp.c comments:
"execeeds" -> "exceeds"
"tacking care" -> "taking care"
"measureable" -> "measurable"

No functional changes.

Signed-off-by: Alok Tiwari <alok.a.tiwari@oracle.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/20250909122611.3711859-1-alok.a.tiwari@oracle.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/ipv6/udp.c

index a35ee6d693a8080b9009f61d23fafd2465b8c625..b70369f3cd3223cfde07556b1cb1636e8bc78d49 100644 (file)
@@ -260,7 +260,7 @@ rescore:
 
                        /* compute_score is too long of a function to be
                         * inlined, and calling it again here yields
-                        * measureable overhead for some
+                        * measurable overhead for some
                         * workloads. Work around it by jumping
                         * backwards to rescore 'result'.
                         */
@@ -449,7 +449,7 @@ struct sock *udp6_lib_lookup(const struct net *net, const struct in6_addr *saddr
 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
 #endif
 
-/* do not use the scratch area len for jumbogram: their length execeeds the
+/* do not use the scratch area len for jumbogram: their length exceeds the
  * scratch area space; note that the IP6CB flags is still in the first
  * cacheline, so checking for jumbograms is cheap
  */
@@ -1048,7 +1048,7 @@ static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
                sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
 }
 
-/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
+/* wrapper for udp_queue_rcv_skb taking care of csum conversion and
  * return code conversion for ip layer consumption
  */
 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,