-# Copyright (c) 2022-2023, PostgreSQL Global Development Group
+# Copyright (c) 2022-2024, PostgreSQL Global Development Group
# Entry point for building PostgreSQL with meson
#
project('postgresql',
['c'],
- version: '16devel',
+ version: '17beta1',
license: 'PostgreSQL',
# We want < 0.56 for python 3.5 compatibility on old platforms. EPEL for
default_options: [
'warning_level=1', #-Wall equivalent
'b_pch=false',
- 'buildtype=release',
+ 'buildtype=debugoptimized', # -O2 + debug
# For compatibility with the autoconf build, set a default prefix. This
# works even on windows, where it's a drive-relative path (i.e. when on
# d:/somepath it'll install to d:/usr/local/pgsql)
@0@
****'''
if fs.exists(meson.current_source_dir() / 'src' / 'include' / 'pg_config.h')
- errmsg_cleanup = 'To clean up, run make maintainer-clean in the source tree.'
+ errmsg_cleanup = 'To clean up, run make distclean in the source tree.'
error(errmsg_nonclean_base.format(errmsg_cleanup))
endif
cdata.set('PG_MAJORVERSION_NUM', pg_version_major)
cdata.set('PG_MINORVERSION_NUM', pg_version_minor)
cdata.set('PG_VERSION_NUM', pg_version_num)
-# PG_VERSION_STR is built later, it depends compiler test results
+# PG_VERSION_STR is built later, it depends on compiler test results
cdata.set_quoted('CONFIGURE_ARGS', '')
# Basic platform specific configuration
###############################################################
-# meson's system names don't quite map to our "traditional" names. In some
-# places we need the "traditional" name, e.g., for mapping
-# src/include/port/$os.h to src/include/pg_config_os.h. Define portname for
-# that purpose.
-portname = host_system
-
exesuffix = '' # overridden below where necessary
dlsuffix = '.so' # overridden below where necessary
library_path_var = 'LD_LIBRARY_PATH'
if host_system == 'dragonfly'
# apparently the most similar
host_system = 'netbsd'
+elif host_system == 'android'
+ # while android isn't quite a normal linux, it seems close enough
+ # for our purposes so far
+ host_system = 'linux'
endif
-if host_system == 'aix'
- library_path_var = 'LIBPATH'
-
- export_file_format = 'aix'
- export_fmt = '-Wl,-bE:@0@'
- mod_link_args_fmt = ['-Wl,-bI:@0@']
- mod_link_with_dir = 'libdir'
- mod_link_with_name = '@0@.imp'
-
- # M:SRE sets a flag indicating that an object is a shared library. Seems to
- # work in some circumstances without, but required in others.
- ldflags_sl += '-Wl,-bM:SRE'
- ldflags_be += '-Wl,-brtllib'
-
- # Native memset() is faster, tested on:
- # - AIX 5.1 and 5.2, XLC 6.0 (IBM's cc)
- # - AIX 5.3 ML3, gcc 4.0.1
- memset_loop_limit = 0
+# meson's system names don't quite map to our "traditional" names. In some
+# places we need the "traditional" name, e.g., for mapping
+# src/include/port/$os.h to src/include/pg_config_os.h. Define portname for
+# that purpose.
+portname = host_system
-elif host_system == 'cygwin'
+if host_system == 'cygwin'
sema_kind = 'unnamed_posix'
cppflags += '-D_GNU_SOURCE'
dlsuffix = '.dll'
library_path_var = 'DYLD_LIBRARY_PATH'
export_file_format = 'darwin'
- export_fmt = '-exported_symbols_list=@0@'
+ export_fmt = '-Wl,-exported_symbols_list,@0@'
mod_link_args_fmt = ['-bundle_loader', '@0@']
mod_link_with_dir = 'bindir'
sysroot_args = [files('src/tools/darwin_sysroot'), get_option('darwin_sysroot')]
pg_sysroot = run_command(sysroot_args, check:true).stdout().strip()
message('darwin sysroot: @0@'.format(pg_sysroot))
- cflags += ['-isysroot', pg_sysroot]
- ldflags += ['-isysroot', pg_sysroot]
+ if pg_sysroot != ''
+ cflags += ['-isysroot', pg_sysroot]
+ ldflags += ['-isysroot', pg_sysroot]
+ endif
+
# meson defaults to -Wl,-undefined,dynamic_lookup for modules, which we
# don't want because a) it's different from what we do for autoconf, b) it
- # causes warnings starting in macOS Ventura
- ldflags_mod += ['-Wl,-undefined,error']
+ # causes warnings in macOS Ventura. But using -Wl,-undefined,error causes a
+ # warning starting in Sonoma. So only add -Wl,-undefined,error if it does
+ # not cause a warning.
+ if cc.has_multi_link_arguments('-Wl,-undefined,error', '-Werror')
+ ldflags_mod += '-Wl,-undefined,error'
+ endif
+
+ # Starting in Sonoma, the linker warns about the same library being
+ # linked twice. Which can easily happen when multiple dependencies
+ # depend on the same library. Quiesce the ill considered warning.
+ ldflags += cc.get_supported_link_arguments('-Wl,-no_warn_duplicate_libraries')
elif host_system == 'freebsd'
sema_kind = 'unnamed_posix'
python = find_program(get_option('PYTHON'), required: true, native: true)
flex = find_program(get_option('FLEX'), native: true, version: '>= 2.5.35')
bison = find_program(get_option('BISON'), native: true, version: '>= 2.3')
-sed = find_program(get_option('SED'), 'sed', native: true)
+sed = find_program(get_option('SED'), 'sed', native: true, required: false)
prove = find_program(get_option('PROVE'), native: true, required: false)
-tar = find_program(get_option('TAR'), native: true)
-gzip = find_program(get_option('GZIP'), native: true)
+tar = find_program(get_option('TAR'), native: true, required: false)
+gzip = find_program(get_option('GZIP'), native: true, required: false)
program_lz4 = find_program(get_option('LZ4'), native: true, required: false)
openssl = find_program(get_option('OPENSSL'), native: true, required: false)
program_zstd = find_program(get_option('ZSTD'), native: true, required: false)
dtrace = find_program(get_option('DTRACE'), native: true, required: get_option('dtrace'))
missing = find_program('config/missing', native: true)
cp = find_program('cp', required: false, native: true)
+xmllint_bin = find_program(get_option('XMLLINT'), native: true, required: false)
+xsltproc_bin = find_program(get_option('XSLTPROC'), native: true, required: false)
bison_flags = []
if bison.found()
}
flex_flags = []
+if flex.found()
+ flex_version_c = run_command(flex, '--version', check: true)
+ flex_version = flex_version_c.stdout().split(' ')[1].split('\n')[0]
+endif
flex_wrapper = files('src/tools/pgflex')
flex_cmd = [python, flex_wrapper,
'--builddir', '@BUILD_ROOT@',
# https://github.com/mesonbuild/meson/issues/8511
meson_binpath_r = run_command(python, 'src/tools/find_meson', check: true)
-if meson_binpath_r.returncode() != 0 or meson_binpath_r.stdout() == ''
+if meson_binpath_r.stdout() == ''
error('huh, could not run find_meson.\nerrcode: @0@\nstdout: @1@\nstderr: @2@'.format(
meson_binpath_r.returncode(),
meson_binpath_r.stdout(),
###############################################################
cdata.set('USE_ASSERT_CHECKING', get_option('cassert') ? 1 : false)
+cdata.set('USE_INJECTION_POINTS', get_option('injection_points') ? 1 : false)
blocksize = get_option('blocksize').to_int() * 1024
dir_man = get_option('mandir')
# FIXME: These used to be separately configurable - worth adding?
-dir_doc = get_option('datadir') / 'doc' / 'postgresql'
-dir_doc_html = dir_doc
+dir_doc = get_option('datadir') / 'doc'
+if not (dir_prefix_contains_pg or dir_doc.contains('pgsql') or dir_doc.contains('postgres'))
+ dir_doc = dir_doc / pkg
+endif
+dir_doc_html = dir_doc / 'html'
dir_locale = get_option('localedir')
dir_include_server = dir_include_pkg / 'server'
dir_include_extension = dir_include_server / 'extension'
dir_data_extension = dir_data / 'extension'
+dir_doc_extension = dir_doc / 'extension'
###############################################################
bonjouropt = get_option('bonjour')
-bonjour = dependency('', required : false)
+bonjour = not_found_dep
if cc.check_header('dns_sd.h', required: bonjouropt,
args: test_c_args, include_directories: postgres_inc) and \
cc.has_function('DNSServiceRegister',
+###############################################################
+# Option: docs in HTML and man page format
+###############################################################
+
+docs_opt = get_option('docs')
+docs_dep = not_found_dep
+if not docs_opt.disabled()
+ if xmllint_bin.found() and xsltproc_bin.found()
+ docs_dep = declare_dependency()
+ elif docs_opt.enabled()
+ error('missing required tools (xmllint and xsltproc needed) for docs in HTML / man page format')
+ endif
+endif
+
+
+
+###############################################################
+# Option: docs in PDF format
+###############################################################
+
+docs_pdf_opt = get_option('docs_pdf')
+docs_pdf_dep = not_found_dep
+if not docs_pdf_opt.disabled()
+ fop = find_program(get_option('FOP'), native: true, required: docs_pdf_opt)
+ if xmllint_bin.found() and xsltproc_bin.found() and fop.found()
+ docs_pdf_dep = declare_dependency()
+ elif docs_pdf_opt.enabled()
+ error('missing required tools for docs in PDF format')
+ endif
+endif
+
+
+
###############################################################
# Library: GSSAPI
###############################################################
endif
if not have_gssapi
- elif cc.has_function('gss_init_sec_context', dependencies: gssapi,
+ elif cc.check_header('gssapi/gssapi_ext.h', dependencies: gssapi, required: false,
+ args: test_c_args, include_directories: postgres_inc)
+ cdata.set('HAVE_GSSAPI_GSSAPI_EXT_H', 1)
+ elif cc.check_header('gssapi_ext.h', args: test_c_args, dependencies: gssapi, required: gssapiopt)
+ cdata.set('HAVE_GSSAPI_EXT_H', 1)
+ else
+ have_gssapi = false
+ endif
+
+ if not have_gssapi
+ elif cc.has_function('gss_store_cred_into', dependencies: gssapi,
args: test_c_args, include_directories: postgres_inc)
cdata.set('ENABLE_GSS', 1)
krb_srvtab = 'FILE:/@0@/krb5.keytab)'.format(get_option('sysconfdir'))
cdata.set_quoted('PG_KRB_SRVTAB', krb_srvtab)
elif gssapiopt.enabled()
- error('''could not find function 'gss_init_sec_context' required for GSSAPI''')
+ error('''could not find function 'gss_store_cred_into' required for GSSAPI''')
else
have_gssapi = false
endif
endif
endif
- # XXX: this shouldn't be tested in the windows case, but should be tested in
- # the dependency() success case
if ldap.found() and cc.has_function('ldap_initialize',
dependencies: ldap, args: test_c_args)
cdata.set('HAVE_LDAP_INITIALIZE', 1)
###############################################################
llvmopt = get_option('llvm')
-if not llvmopt.disabled()
- add_languages('cpp', required: true, native: false)
- llvm = dependency('llvm', version: '>=3.9', method: 'config-tool', required: llvmopt)
+llvm = not_found_dep
+if add_languages('cpp', required: llvmopt, native: false)
+ llvm = dependency('llvm', version: '>=10', method: 'config-tool', required: llvmopt)
if llvm.found()
ccache = find_program('ccache', native: true, required: false)
clang = find_program(llvm_binpath / 'clang', required: true)
endif
-else
- llvm = not_found_dep
+elif llvmopt.auto()
+ message('llvm requires a C++ compiler')
endif
icuopt = get_option('icu')
if not icuopt.disabled()
- icu = dependency('icu-uc', required: icuopt.enabled())
- icu_i18n = dependency('icu-i18n', required: icuopt.enabled())
+ icu = dependency('icu-uc', required: icuopt)
+ icu_i18n = dependency('icu-i18n', required: icuopt)
if icu.found()
cdata.set('USE_ICU', 1)
###############################################################
pyopt = get_option('plpython')
+python3_dep = not_found_dep
if not pyopt.disabled()
pm = import('python')
- python3_inst = pm.find_installation(required: pyopt.enabled())
- python3_dep = python3_inst.dependency(embed: true, required: pyopt.enabled())
- if not cc.check_header('Python.h', dependencies: python3_dep, required: pyopt.enabled())
- python3_dep = not_found_dep
+ python3_inst = pm.find_installation(python.path(), required: pyopt)
+ if python3_inst.found()
+ python3_dep = python3_inst.dependency(embed: true, required: pyopt)
+ # Remove this check after we depend on Meson >= 1.1.0
+ if not cc.check_header('Python.h', dependencies: python3_dep, required: pyopt)
+ python3_dep = not_found_dep
+ endif
endif
-else
- python3_dep = not_found_dep
endif
readline = dependency(readline_dep, required: false)
if not readline.found()
readline = cc.find_library(readline_dep,
- required: get_option('readline').enabled(),
+ required: get_option('readline'),
dirs: test_lib_d)
endif
if readline.found()
if not at_least_one_header_found
error('''readline header not found
-If you have @0@ already installed, see meson-log/meson-log.txt for details on the
+If you have @0@ already installed, see meson-logs/meson-log.txt for details on the
failure. It is possible the compiler isn't looking in the proper directory.
-Use -Dreadline=false to disable readline support.'''.format(readline_dep))
+Use -Dreadline=disabled to disable readline support.'''.format(readline_dep))
endif
check_funcs = [
foreach func : check_funcs
found = cc.has_function(func, dependencies: [readline],
args: test_c_args, include_directories: postgres_inc)
- cdata.set('HAVE_'+func.to_upper(), found ? 1 : false)
+ cdata.set('HAVE_' + func.to_upper(), found ? 1 : false)
endforeach
check_vars = [
]
foreach var : check_vars
- cdata.set('HAVE_'+var.to_upper(),
+ cdata.set('HAVE_' + var.to_upper(),
cc.has_header_symbol(readline_h, var,
args: test_c_args, include_directories: postgres_inc,
prefix: '#include <stdio.h>',
['CRYPTO_new_ex_data', {'required': true}],
['SSL_new', {'required': true}],
- # Function introduced in OpenSSL 1.0.2.
- ['X509_get_signature_nid'],
+ # Function introduced in OpenSSL 1.0.2, not in LibreSSL.
+ ['SSL_CTX_set_cert_cb'],
# Functions introduced in OpenSSL 1.1.0. We used to check for
# OPENSSL_VERSION_NUMBER, but that didn't work with 1.1.0, because LibreSSL
# doesn't have these OpenSSL 1.1.0 functions. So check for individual
# functions.
['OPENSSL_init_ssl'],
- ['BIO_get_data'],
['BIO_meth_new'],
['ASN1_STRING_get0_data'],
['HMAC_CTX_new'],
if are_openssl_funcs_complete
cdata.set('USE_OPENSSL', 1,
description: 'Define to 1 to build with OpenSSL support. (-Dssl=openssl)')
- cdata.set('OPENSSL_API_COMPAT', '0x10001000L',
- description: '''Define to the OpenSSL API version in use. This avoids deprecation warnings from newer OpenSSL versions.''')
+ cdata.set('OPENSSL_API_COMPAT', '0x10002000L',
+ description: 'Define to the OpenSSL API version in use. This avoids deprecation warnings from newer OpenSSL versions.')
ssl_library = 'openssl'
else
ssl = not_found_dep
uuidfunc = 'uuid_export'
uuidheader = 'uuid.h'
else
- error('huh')
+ error('unknown uuid build option value: @0@'.format(uuidopt))
endif
- if not cc.has_header_symbol(uuidheader, uuidfunc, args: test_c_args, dependencies: uuid)
+ if not cc.has_header_symbol(uuidheader, uuidfunc,
+ args: test_c_args,
+ include_directories: postgres_inc,
+ dependencies: uuid)
error('uuid library @0@ missing required function @1@'.format(uuidopt, uuidfunc))
endif
cdata.set('HAVE_@0@'.format(uuidheader.underscorify().to_upper()), 1)
warning('did not find zlib')
elif not cc.has_header('zlib.h',
args: test_c_args, include_directories: postgres_inc,
- dependencies: [zlib_t], required: zlibopt.enabled())
+ dependencies: [zlib_t], required: zlibopt)
warning('zlib header not found')
- elif not cc.has_type('z_streamp',
- dependencies: [zlib_t], prefix: '#include <zlib.h>',
- args: test_c_args, include_directories: postgres_inc)
- if zlibopt.enabled()
- error('zlib version is too old')
- else
- warning('zlib version is too old')
- endif
else
zlib = zlib_t
endif
cdata.set('SIZEOF_LONG', sizeof_long)
if sizeof_long == 8
cdata.set('HAVE_LONG_INT_64', 1)
- cdata.set('PG_INT64_TYPE', 'long int')
+ pg_int64_type = 'long int'
cdata.set_quoted('INT64_MODIFIER', 'l')
elif sizeof_long == 4 and cc.sizeof('long long', args: test_c_args) == 8
cdata.set('HAVE_LONG_LONG_INT_64', 1)
- cdata.set('PG_INT64_TYPE', 'long long int')
+ pg_int64_type = 'long long int'
cdata.set_quoted('INT64_MODIFIER', 'll')
else
error('do not know how to get a 64bit int')
endif
+cdata.set('PG_INT64_TYPE', pg_int64_type)
if host_machine.endian() == 'big'
cdata.set('WORDS_BIGENDIAN', 1)
endif
+# Determine memory alignment requirements for the basic C data types.
+
alignof_types = ['short', 'int', 'long', 'double']
-maxalign = 0
foreach t : alignof_types
align = cc.alignment(t, args: test_c_args)
- if maxalign < align
- maxalign = align
- endif
cdata.set('ALIGNOF_@0@'.format(t.to_upper()), align)
endforeach
-cdata.set('MAXIMUM_ALIGNOF', maxalign)
+
+# Compute maximum alignment of any basic type.
+#
+# We require 'double' to have the strictest alignment among the basic types,
+# because otherwise the C ABI might impose 8-byte alignment on some of the
+# other C types that correspond to TYPALIGN_DOUBLE SQL types. That could
+# cause a mismatch between the tuple layout and the C struct layout of a
+# catalog tuple. We used to carefully order catalog columns such that any
+# fixed-width, attalign=4 columns were at offsets divisible by 8 regardless
+# of MAXIMUM_ALIGNOF to avoid that, but we no longer support any platforms
+# where TYPALIGN_DOUBLE != MAXIMUM_ALIGNOF.
+#
+# We assume without checking that int64's alignment is at least as strong
+# as long, char, short, or int. Note that we intentionally do not consider
+# any types wider than 64 bits, as allowing MAXIMUM_ALIGNOF to exceed 8
+# would be too much of a penalty for disk and memory space.
+alignof_double = cdata.get('ALIGNOF_DOUBLE')
+if cc.alignment(pg_int64_type, args: test_c_args) > alignof_double
+ error('alignment of int64 is greater than the alignment of double')
+endif
+cdata.set('MAXIMUM_ALIGNOF', alignof_double)
cdata.set('SIZEOF_VOID_P', cc.sizeof('void *', args: test_c_args))
cdata.set('SIZEOF_SIZE_T', cc.sizeof('size_t', args: test_c_args))
if not meson.is_cross_build()
r = cc.run('''
/* This must match the corresponding code in c.h: */
- #if defined(__GNUC__) || defined(__SUNPRO_C) || defined(__IBMC__)
+ #if defined(__GNUC__) || defined(__SUNPRO_C)
#define pg_attribute_aligned(a) __attribute__((aligned(a)))
#elif defined(_MSC_VER)
#define pg_attribute_aligned(a) __declspec(align(a))
if not buggy_int128
cdata.set('PG_INT128_TYPE', '__int128')
- cdata.set('ALIGNOF_PG_INT128_TYPE', cc.
- alignment('__int128', args: test_c_args))
+ cdata.set('ALIGNOF_PG_INT128_TYPE', cc.alignment('__int128', args: test_c_args))
endif
endif
# We use <stdbool.h> if we have it and it declares type bool as having
# size 1. Otherwise, c.h will fall back to declaring bool as unsigned char.
if cc.has_type('_Bool', args: test_c_args) \
- and cc.has_type('bool', prefix: '#include <stdbool.h>', args: test_c_args) \
- and cc.sizeof('bool', prefix: '#include <stdbool.h>', args: test_c_args) == 1
+ and cc.has_type('bool', prefix: '#include <stdbool.h>', args: test_c_args) \
+ and cc.sizeof('bool', prefix: '#include <stdbool.h>', args: test_c_args) == 1
cdata.set('HAVE__BOOL', 1)
cdata.set('PG_USE_STDBOOL', 1)
endif
if cc.has_function_attribute('visibility:default') and \
- cc.has_function_attribute('visibility:hidden')
+ cc.has_function_attribute('visibility:hidden')
cdata.set('HAVE_VISIBILITY_ATTRIBUTE', 1)
# Only newer versions of meson know not to apply gnu_symbol_visibility =
- # inlineshidden to C code as well... Any either way, we want to put these
+ # inlineshidden to C code as well... And either way, we want to put these
# flags into exported files (pgxs, .pc files).
cflags_mod += '-fvisibility=hidden'
cxxflags_mod += ['-fvisibility=hidden', '-fvisibility-inlines-hidden']
endif
+# Check for __get_cpuid_count() and __cpuidex() in a similar fashion.
+if cc.links('''
+ #include <cpuid.h>
+ int main(int arg, char **argv)
+ {
+ unsigned int exx[4] = {0, 0, 0, 0};
+ __get_cpuid_count(7, 0, &exx[0], &exx[1], &exx[2], &exx[3]);
+ }
+ ''', name: '__get_cpuid_count',
+ args: test_c_args)
+ cdata.set('HAVE__GET_CPUID_COUNT', 1)
+elif cc.links('''
+ #include <intrin.h>
+ int main(int arg, char **argv)
+ {
+ unsigned int exx[4] = {0, 0, 0, 0};
+ __cpuidex(exx, 7, 0);
+ }
+ ''', name: '__cpuidex',
+ args: test_c_args)
+ cdata.set('HAVE__CPUIDEX', 1)
+endif
+
+
# Defend against clang being used on x86-32 without SSE2 enabled. As current
# versions of clang do not understand -fexcess-precision=standard, the use of
# x87 floating point operations leads to problems like isinf possibly returning
endforeach
-# From Project.pm
if cc.get_id() == 'msvc'
cflags_warn += [
'/wd4018', # signed/unsigned mismatch
endif
+###############################################################
+# Check for the availability of XSAVE intrinsics.
+###############################################################
+
+cflags_xsave = []
+if host_cpu == 'x86' or host_cpu == 'x86_64'
+
+ prog = '''
+#include <immintrin.h>
+
+int main(void)
+{
+ return _xgetbv(0) & 0xe0;
+}
+'''
+
+ if cc.links(prog, name: 'XSAVE intrinsics without -mxsave',
+ args: test_c_args)
+ cdata.set('HAVE_XSAVE_INTRINSICS', 1)
+ elif cc.links(prog, name: 'XSAVE intrinsics with -mxsave',
+ args: test_c_args + ['-mxsave'])
+ cdata.set('HAVE_XSAVE_INTRINSICS', 1)
+ cflags_xsave += '-mxsave'
+ endif
+
+endif
+
+
+###############################################################
+# Check for the availability of AVX-512 popcount intrinsics.
+###############################################################
+
+cflags_popcnt = []
+if host_cpu == 'x86_64'
+
+ prog = '''
+#include <immintrin.h>
+
+int main(void)
+{
+ const char buf[sizeof(__m512i)];
+ INT64 popcnt = 0;
+ __m512i accum = _mm512_setzero_si512();
+ const __m512i val = _mm512_maskz_loadu_epi8((__mmask64) 0xf0f0f0f0f0f0f0f0, (const __m512i *) buf);
+ const __m512i cnt = _mm512_popcnt_epi64(val);
+ accum = _mm512_add_epi64(accum, cnt);
+ popcnt = _mm512_reduce_add_epi64(accum);
+ /* return computed value, to prevent the above being optimized away */
+ return popcnt == 0;
+}
+'''
+
+ if cc.links(prog, name: 'AVX-512 popcount without -mavx512vpopcntdq -mavx512bw',
+ args: test_c_args + ['-DINT64=@0@'.format(cdata.get('PG_INT64_TYPE'))])
+ cdata.set('USE_AVX512_POPCNT_WITH_RUNTIME_CHECK', 1)
+ elif cc.links(prog, name: 'AVX-512 popcount with -mavx512vpopcntdq -mavx512bw',
+ args: test_c_args + ['-DINT64=@0@'.format(cdata.get('PG_INT64_TYPE'))] + ['-mavx512vpopcntdq'] + ['-mavx512bw'])
+ cdata.set('USE_AVX512_POPCNT_WITH_RUNTIME_CHECK', 1)
+ cflags_popcnt += ['-mavx512vpopcntdq'] + ['-mavx512bw']
+ endif
+
+endif
+
###############################################################
# Select CRC-32C implementation.
cdata.set('USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK', 1)
have_optimized_crc = true
endif
+
+elif host_cpu == 'loongarch64'
+
+ prog = '''
+int main(void)
+{
+ unsigned int crc = 0;
+ crc = __builtin_loongarch_crcc_w_b_w(0, crc);
+ crc = __builtin_loongarch_crcc_w_h_w(0, crc);
+ crc = __builtin_loongarch_crcc_w_w_w(0, crc);
+ crc = __builtin_loongarch_crcc_w_d_w(0, crc);
+
+ /* return computed value, to prevent the above being optimized away */
+ return crc == 0;
+}
+'''
+
+ if cc.links(prog, name: '__builtin_loongarch_crcc_w_b_w, __builtin_loongarch_crcc_w_h_w, __builtin_loongarch_crcc_w_w_w, and __builtin_loongarch_crcc_w_d_w',
+ args: test_c_args)
+ # Use LoongArch CRC instruction unconditionally
+ cdata.set('USE_LOONGARCH_CRC32C', 1)
+ have_optimized_crc = true
+ endif
+
endif
if not have_optimized_crc
['pwritev', 'sys/uio.h'],
]
+# Check presence of some optional LLVM functions.
+if llvm.found()
+ decl_checks += [
+ ['LLVMCreateGDBRegistrationListener', 'llvm-c/ExecutionEngine.h'],
+ ['LLVMCreatePerfJITEventListener', 'llvm-c/ExecutionEngine.h'],
+ ]
+endif
+
foreach c : decl_checks
func = c.get(0)
header = c.get(1)
cdata.set('STRERROR_R_INT', false)
endif
-# Check for the locale_t type and find the right header file. macOS
-# needs xlocale.h; standard is locale.h, but glibc also has an
-# xlocale.h file that we should not use. MSVC has a replacement
-# defined in src/include/port/win32_port.h.
-if cc.has_type('locale_t', prefix: '#include <locale.h>')
- cdata.set('HAVE_LOCALE_T', 1)
-elif cc.has_type('locale_t', prefix: '#include <xlocale.h>')
- cdata.set('HAVE_LOCALE_T', 1)
+# Find the right header file for the locale_t type. macOS needs xlocale.h;
+# standard is locale.h, but glibc <= 2.25 also had an xlocale.h file that
+# we should not use so we check the standard header first. MSVC has a
+# replacement defined in src/include/port/win32_port.h.
+if not cc.has_type('locale_t', prefix: '#include <locale.h>') and \
+ cc.has_type('locale_t', prefix: '#include <xlocale.h>')
cdata.set('LOCALE_T_IN_XLOCALE', 1)
-elif cc.get_id() == 'msvc'
- cdata.set('HAVE_LOCALE_T', 1)
endif
# Check if the C compiler understands typeof or a variant. Define
# conflict.
#
# We assume C99 support, so we don't need to make this conditional.
-#
-# XXX: Historically we allowed platforms to disable restrict in template
-# files, but that was only added for AIX when building with XLC, which we
-# don't support yet.
cdata.set('pg_restrict', '__restrict')
dl_dep = cc.find_library('dl', required: false)
util_dep = cc.find_library('util', required: false)
-posix4_dep = cc.find_library('posix4', required: false)
getopt_dep = cc.find_library('getopt', required: false)
gnugetopt_dep = cc.find_library('gnugetopt', required: false)
func_checks = [
['_configthreadlocale', {'skip': host_system != 'windows'}],
['backtrace_symbols', {'dependencies': [execinfo_dep]}],
- ['clock_gettime', {'dependencies': [rt_dep, posix4_dep], 'define': false}],
+ ['clock_gettime', {'dependencies': [rt_dep], 'define': false}],
['copyfile'],
+ ['copy_file_range'],
# gcc/clang's sanitizer helper library provides dlopen but not dlsym, thus
# when enabling asan the dlopen check doesn't notice that -ldl is actually
# required. Just checking for dlsym() ought to suffice.
['dlsym', {'dependencies': [dl_dep], 'define': false}],
['explicit_bzero'],
- ['fdatasync', {'dependencies': [rt_dep, posix4_dep], 'define': false}], # Solaris
['getifaddrs'],
['getopt', {'dependencies': [getopt_dep, gnugetopt_dep], 'skip': always_replace_getopt}],
['getopt_long', {'dependencies': [getopt_dep, gnugetopt_dep], 'skip': always_replace_getopt_long}],
['posix_fadvise'],
['posix_fallocate'],
['ppoll'],
- ['pstat'],
['pthread_barrier_wait', {'dependencies': [thread_dep]}],
['pthread_is_threaded_np', {'dependencies': [thread_dep]}],
['sem_init', {'dependencies': [rt_dep, thread_dep], 'skip': sema_kind != 'unnamed_posix', 'define': false}],
endif
-# MSVC has replacements defined in src/include/port/win32_port.h.
-if cc.get_id() == 'msvc'
- cdata.set('HAVE_WCSTOMBS_L', 1)
- cdata.set('HAVE_MBSTOWCS_L', 1)
-endif
-
-
# if prerequisites for unnamed posix semas aren't fulfilled, fall back to sysv
# semaphores
if sema_kind == 'unnamed_posix' and \
)
-
-###############################################################
-# Threading
-###############################################################
-
-# XXX: About to rely on thread safety in the autoconf build, so not worth
-# implementing a fallback.
-cdata.set('ENABLE_THREAD_SAFETY', 1)
-
-
-
###############################################################
# NLS / Gettext
###############################################################
# otherwise there'd be lots of
# "Gettext not found, all translation (po) targets will be ignored."
# warnings if not found.
- msgfmt = find_program('msgfmt', required: nlsopt.enabled(), native: true)
+ msgfmt = find_program('msgfmt', required: nlsopt, native: true)
- # meson 0.59 has this wrapped in dependency('int')
+ # meson 0.59 has this wrapped in dependency('intl')
if (msgfmt.found() and
cc.check_header('libintl.h', required: nlsopt,
args: test_c_args, include_directories: postgres_inc))
pl_targets = []
contrib_targets = []
testprep_targets = []
+nls_targets = []
# Define the tests to distribute them to the correct test styles later
+###
+### Helpers for custom targets used across the tree
+###
+
+catalog_pm = files('src/backend/catalog/Catalog.pm')
+perfect_hash_pm = files('src/tools/PerfectHash.pm')
+gen_kwlist_deps = [perfect_hash_pm]
+gen_kwlist_cmd = [
+ perl, '-I', '@SOURCE_ROOT@/src/tools',
+ files('src/tools/gen_keywordlist.pl'),
+ '--output', '@OUTDIR@', '@INPUT@']
+
+
+
###
### windows resources related stuff
###
testprep_targets += test_install_libs
-# command to install files used for tests, which aren't installed by default
-install_test_files_args = [
- install_files,
- '--prefix', dir_prefix,
- '--install', contrib_data_dir, test_install_data,
- '--install', dir_lib_pkg, test_install_libs,
-]
-
-# Target installing files required for installcheck of various modules
-run_target('install-test-files',
- command: [python] + install_test_files_args,
- depends: testprep_targets,
-)
-
-
# If there are any files in the source directory that we also generate in the
# build directory, they might get preferred over the newly generated files,
# e.g. because of a #include "file", which always will search in the current
foreach t : potentially_conflicting_files_t
potentially_conflicting_files += t.full_path()
endforeach
-foreach t : configure_files
- t = '@0@'.format(t)
+foreach t1 : configure_files
+ if meson.version().version_compare('>=0.59')
+ t = fs.parent(t1) / fs.name(t1)
+ else
+ t = '@0@'.format(t1)
+ endif
potentially_conflicting_files += meson.current_build_dir() / t
endforeach
foreach sub, fnames : generated_sources_ac
+###############################################################
+# Install targets
+###############################################################
+
+
+# We want to define additional install targets beyond what meson provides. For
+# that we need to define targets depending on nearly everything. We collected
+# the results of i18n.gettext() invocations into nls_targets, that also
+# includes maintainer targets though. Collect the ones we want as a dependency.
+#
+# i18n.gettext() doesn't return the dependencies before 0.60 - but the gettext
+# generation happens during install, so that's not a real issue.
+nls_mo_targets = []
+if libintl.found() and meson.version().version_compare('>=0.60')
+ # use range() to avoid the flattening of the list that foreach() would do
+ foreach off : range(0, nls_targets.length())
+ # i18n.gettext() list containing 1) list of built .mo files 2) maintainer
+ # -pot target 3) maintainer -pot target
+ nls_mo_targets += nls_targets[off][0]
+ endforeach
+ alias_target('nls', nls_mo_targets)
+endif
+
+
+all_built = [
+ backend_targets,
+ bin_targets,
+ libpq_st,
+ pl_targets,
+ contrib_targets,
+ nls_mo_targets,
+ testprep_targets,
+ ecpg_targets,
+]
+
+# Meson's default install target is quite verbose. Provide one that is quiet.
+install_quiet = custom_target('install-quiet',
+ output: 'install-quiet',
+ build_always_stale: true,
+ build_by_default: false,
+ command: [meson_bin, meson_args, 'install', '--quiet', '--no-rebuild'],
+ depends: all_built,
+)
+
+# Target to install files used for tests, which aren't installed by default
+install_test_files_args = [
+ install_files,
+ '--prefix', dir_prefix,
+ '--install', contrib_data_dir, test_install_data,
+ '--install', dir_lib_pkg, test_install_libs,
+]
+run_target('install-test-files',
+ command: [python] + install_test_files_args,
+ depends: testprep_targets,
+)
+
+
+
###############################################################
# Test prep
###############################################################
'muon': []
}[meson_impl]
-# setup tests should be run first,
+# setup tests should be run first,
# so define priority for these
setup_tests_priority = 100
test('tmp_install',
test_env = environment()
temp_install_bindir = test_install_location / get_option('bindir')
+test_initdb_template = meson.build_root() / 'tmp_install' / 'initdb-template'
test_env.set('PG_REGRESS', pg_regress.full_path())
test_env.set('REGRESS_SHLIB', regress_module.full_path())
+test_env.set('INITDB_TEMPLATE', test_initdb_template)
# Test suites that are not safe by default but can be run if selected
# by the user via the whitespace-separated list in variable PG_TEST_EXTRA.
endif
+# Create (and remove old) initdb template directory. Tests use that, where
+# possible, to make it cheaper to run tests.
+#
+# Use python to remove the old cached initdb, as we cannot rely on a working
+# 'rm' binary on windows.
+test('initdb_cache',
+ python,
+ args: [
+ '-c', '''
+import shutil
+import sys
+import subprocess
+
+shutil.rmtree(sys.argv[1], ignore_errors=True)
+sp = subprocess.run(sys.argv[2:] + [sys.argv[1]])
+sys.exit(sp.returncode)
+''',
+ test_initdb_template,
+ temp_install_bindir / 'initdb',
+ '--auth', 'trust', '--no-sync', '--no-instructions', '--lc-messages=C',
+ '--no-clean'
+ ],
+ priority: setup_tests_priority - 1,
+ timeout: 300,
+ is_parallel: false,
+ env: test_env,
+ suite: ['setup'])
+
+
###############################################################
# Test Generation
env.prepend('PATH', temp_install_bindir, test_dir['bd'])
test_kwargs = {
+ 'protocol': 'tap',
'priority': 10,
'timeout': 1000,
'depends': test_deps + t.get('deps', []),
testport += 1
elif kind == 'tap'
+ testwrap_tap = testwrap_base
if not tap_tests_enabled
- continue
+ testwrap_tap += ['--skip', 'TAP tests not enabled']
endif
test_command = [
test(test_dir['name'] / onetap_p,
python,
kwargs: test_kwargs,
- args: testwrap_base + [
+ args: testwrap_tap + [
'--testgroup', test_dir['name'],
'--testname', onetap_p,
'--', test_command,
endif
+
###############################################################
# Pseudo targets
###############################################################
alias_target('contrib', contrib_targets)
alias_target('testprep', testprep_targets)
+alias_target('world', all_built, docs)
+alias_target('install-world', install_quiet, installdocs)
+
+run_target('help',
+ command: [
+ perl, '-ne', 'next if /^#/; print',
+ files('doc/src/sgml/targets-meson.txt'),
+ ]
+)
+
+
+
+###############################################################
+# Distribution archive
+###############################################################
+
+# Meson has its own distribution building command (meson dist), but we
+# are not using that at this point. The main problem is that, the way
+# they have implemented it, it is not deterministic. Also, we want it
+# to be equivalent to the "make" version for the time being. But the
+# target name "dist" in meson is reserved for that reason, so we call
+# the custom target "pgdist".
+
+git = find_program('git', required: false, native: true, disabler: true)
+bzip2 = find_program('bzip2', required: false, native: true)
+
+distdir = meson.project_name() + '-' + meson.project_version()
+
+pg_git_revision = get_option('PG_GIT_REVISION')
+
+# Note: core.autocrlf=false is needed to avoid line-ending conversion
+# in case the environment has a different setting. Without this, a
+# tarball created on Windows might be different than on, and unusable
+# on, Unix machines.
+
+tar_gz = custom_target('tar.gz',
+ build_always_stale: true,
+ command: [git, '-C', '@SOURCE_ROOT@',
+ '-c', 'core.autocrlf=false',
+ 'archive',
+ '--format', 'tar.gz',
+ '-9',
+ '--prefix', distdir + '/',
+ '-o', join_paths(meson.build_root(), '@OUTPUT@'),
+ pg_git_revision],
+ output: distdir + '.tar.gz',
+)
+
+if bzip2.found()
+ tar_bz2 = custom_target('tar.bz2',
+ build_always_stale: true,
+ command: [git, '-C', '@SOURCE_ROOT@',
+ '-c', 'core.autocrlf=false',
+ '-c', 'tar.tar.bz2.command="@0@" -c'.format(bzip2.path()),
+ 'archive',
+ '--format', 'tar.bz2',
+ '--prefix', distdir + '/',
+ '-o', join_paths(meson.build_root(), '@OUTPUT@'),
+ pg_git_revision],
+ output: distdir + '.tar.bz2',
+ )
+else
+ tar_bz2 = custom_target('tar.bz2',
+ command: [perl, '-e', 'exit 1'],
+ output: distdir + '.tar.bz2',
+ )
+endif
+
+alias_target('pgdist', [tar_gz, tar_bz2])
+
+# Make the standard "dist" command fail, to prevent accidental use.
+# But not if we are in a subproject, in case the parent project wants to
+# create a dist using the standard Meson command.
+if not meson.is_subproject()
+ # We can only pass the identifier perl here when we depend on >= 0.55
+ if meson.version().version_compare('>=0.55')
+ meson.add_dist_script(perl, '-e', 'exit 1')
+ endif
+endif
+
###############################################################
{
'bison': '@0@ @1@'.format(bison.full_path(), bison_version),
'dtrace': dtrace,
+ 'flex': '@0@ @1@'.format(flex.full_path(), flex_version),
},
section: 'Programs',
)
{
'bonjour': bonjour,
'bsd_auth': bsd_auth,
+ 'docs': docs_dep,
+ 'docs_pdf': docs_pdf_dep,
'gss': gssapi,
'icu': icu,
'ldap': ldap,