* remove gamin
This commit is contained in:
parent
6b22259109
commit
b2883740fc
1128
ceph/PKGBUILD
1128
ceph/PKGBUILD
File diff suppressed because it is too large
Load Diff
@ -1,15 +0,0 @@
|
||||
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
|
||||
index 874eabfaa1..e337f4cf8e 100644
|
||||
--- a/src/CMakeLists.txt
|
||||
+++ b/src/CMakeLists.txt
|
||||
@@ -601,10 +601,6 @@ install(PROGRAMS
|
||||
${CMAKE_SOURCE_DIR}/src/ceph-run
|
||||
${CMAKE_SOURCE_DIR}/src/ceph-clsinfo
|
||||
DESTINATION bin)
|
||||
-install(PROGRAMS
|
||||
- ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/init-ceph
|
||||
- DESTINATION ${CMAKE_INSTALL_SYSCONFDIR}/init.d
|
||||
- RENAME ceph)
|
||||
|
||||
install(FILES
|
||||
${CMAKE_SOURCE_DIR}/share/id_rsa_drop.ceph.com
|
@ -1,26 +0,0 @@
|
||||
diff --git a/src/test/librados/CMakeLists.txt b/src/test/librados/CMakeLists.txt
|
||||
index e9338bbd464..f869ca4e821 100644
|
||||
--- a/src/test/librados/CMakeLists.txt
|
||||
+++ b/src/test/librados/CMakeLists.txt
|
||||
@@ -58,9 +58,9 @@ add_executable(ceph_test_rados_api_aio_pp
|
||||
target_link_libraries(ceph_test_rados_api_aio_pp
|
||||
librados ${UNITTEST_LIBS} radostest-cxx)
|
||||
|
||||
-add_executable(ceph_test_rados_api_asio asio.cc)
|
||||
-target_link_libraries(ceph_test_rados_api_asio global
|
||||
- librados ${UNITTEST_LIBS} Boost::coroutine Boost::context)
|
||||
+#add_executable(ceph_test_rados_api_asio asio.cc)
|
||||
+#target_link_libraries(ceph_test_rados_api_asio global
|
||||
+# librados ${UNITTEST_LIBS} Boost::coroutine Boost::context)
|
||||
|
||||
add_executable(ceph_test_rados_api_list
|
||||
list.cc
|
||||
@@ -144,7 +144,7 @@ target_link_libraries(ceph_test_rados_api_snapshots_pp
|
||||
install(TARGETS
|
||||
ceph_test_rados_api_aio
|
||||
ceph_test_rados_api_aio_pp
|
||||
- ceph_test_rados_api_asio
|
||||
+ #ceph_test_rados_api_asio
|
||||
ceph_test_rados_api_c_read_operations
|
||||
ceph_test_rados_api_c_write_operations
|
||||
ceph_test_rados_api_cmd
|
@ -1,72 +0,0 @@
|
||||
diff --git a/src/pybind/cephfs/setup.py b/src/pybind/cephfs/setup.py
|
||||
index f6c2025f75d..8d788038e8f 100755
|
||||
--- a/src/pybind/cephfs/setup.py
|
||||
+++ b/src/pybind/cephfs/setup.py
|
||||
@@ -169,13 +169,6 @@ except ImportError:
|
||||
else:
|
||||
source = "cephfs.pyx"
|
||||
|
||||
-# Disable cythonification if we're not really building anything
|
||||
-if (len(sys.argv) >= 2 and
|
||||
- any(i in sys.argv[1:] for i in ('--help', 'clean', 'egg_info', '--version')
|
||||
- )):
|
||||
- def cythonize(x, **kwargs):
|
||||
- return x
|
||||
-
|
||||
setup(
|
||||
name='cephfs',
|
||||
version=__version__,
|
||||
diff --git a/src/pybind/rados/setup.py b/src/pybind/rados/setup.py
|
||||
index 62b54d26b6c..5b1a9ade949 100755
|
||||
--- a/src/pybind/rados/setup.py
|
||||
+++ b/src/pybind/rados/setup.py
|
||||
@@ -160,13 +160,6 @@ except ImportError:
|
||||
else:
|
||||
source = "rados.pyx"
|
||||
|
||||
-# Disable cythonification if we're not really building anything
|
||||
-if (len(sys.argv) >= 2 and
|
||||
- any(i in sys.argv[1:] for i in ('--help', 'clean', 'egg_info', '--version')
|
||||
- )):
|
||||
- def cythonize(x, **kwargs):
|
||||
- return x
|
||||
-
|
||||
setup(
|
||||
name='rados',
|
||||
version=__version__,
|
||||
diff --git a/src/pybind/rbd/setup.py b/src/pybind/rbd/setup.py
|
||||
index 1f20c3ed42f..93e8a97eb15 100755
|
||||
--- a/src/pybind/rbd/setup.py
|
||||
+++ b/src/pybind/rbd/setup.py
|
||||
@@ -167,13 +167,6 @@ except ImportError:
|
||||
else:
|
||||
source = "rbd.pyx"
|
||||
|
||||
-# Disable cythonification if we're not really building anything
|
||||
-if (len(sys.argv) >= 2 and
|
||||
- any(i in sys.argv[1:] for i in ('--help', 'clean', 'egg_info', '--version')
|
||||
- )):
|
||||
- def cythonize(x, **kwargs):
|
||||
- return x
|
||||
-
|
||||
setup(
|
||||
name='rbd',
|
||||
version=__version__,
|
||||
diff --git a/src/pybind/rgw/setup.py b/src/pybind/rgw/setup.py
|
||||
index ed45399d394..92c7e650173 100755
|
||||
--- a/src/pybind/rgw/setup.py
|
||||
+++ b/src/pybind/rgw/setup.py
|
||||
@@ -168,13 +168,6 @@ except ImportError:
|
||||
else:
|
||||
source = "rgw.pyx"
|
||||
|
||||
-# Disable cythonification if we're not really building anything
|
||||
-if (len(sys.argv) >= 2 and
|
||||
- any(i in sys.argv[1:] for i in ('--help', 'clean', 'egg_info', '--version')
|
||||
- )):
|
||||
- def cythonize(x, **kwargs):
|
||||
- return x
|
||||
-
|
||||
setup(
|
||||
name='rgw',
|
||||
version=__version__,
|
File diff suppressed because it is too large
Load Diff
@ -1,12 +0,0 @@
|
||||
diff --git a/src/pybind/mgr/tox.ini b/src/pybind/mgr/tox.ini
|
||||
index 3426ae28735..d7756445a6b 100644
|
||||
--- a/src/pybind/mgr/tox.ini
|
||||
+++ b/src/pybind/mgr/tox.ini
|
||||
@@ -184,7 +184,6 @@ modules =
|
||||
commands =
|
||||
flake8 --config=tox.ini {posargs} \
|
||||
{posargs:{[testenv:flake8]modules}}
|
||||
- bash -c 'test $(git ls-files cephadm | grep ".py$" | grep -v tests | xargs grep "docker.io" | wc -l) == 13'
|
||||
|
||||
[testenv:jinjalint]
|
||||
basepython = python3
|
@ -1,12 +0,0 @@
|
||||
diff --git a/src/pybind/mgr/tox.ini b/src/pybind/mgr/tox.ini
|
||||
index 85e7ae3db11..9a8d009b973 100644
|
||||
--- a/src/pybind/mgr/tox.ini
|
||||
+++ b/src/pybind/mgr/tox.ini
|
||||
@@ -88,7 +88,6 @@ commands =
|
||||
-m crash \
|
||||
-m dashboard \
|
||||
-m devicehealth \
|
||||
- -m diskprediction_local \
|
||||
-m hello \
|
||||
-m influx \
|
||||
-m iostat \
|
@ -1,71 +0,0 @@
|
||||
diff --git a/src/include/rangeset.h b/src/include/rangeset.h
|
||||
index e7e3d047c72..f19af0b61e4 100644
|
||||
--- a/src/include/rangeset.h
|
||||
+++ b/src/include/rangeset.h
|
||||
@@ -55,9 +55,14 @@ struct _rangeset_base {
|
||||
|
||||
|
||||
template <class T>
|
||||
-class rangeset_iterator :
|
||||
- public std::iterator<std::input_iterator_tag, T>
|
||||
+class rangeset_iterator
|
||||
{
|
||||
+ using iterator_category = std::input_iterator_tag;
|
||||
+ using value_type = T;
|
||||
+ using difference_type = std::ptrdiff_t;
|
||||
+ using pointer = T*;
|
||||
+ using reference = T&;
|
||||
+
|
||||
//typedef typename map<T,T>::iterator mapit;
|
||||
|
||||
map<T,T> ranges;
|
||||
diff --git a/src/msg/async/dpdk/circular_buffer.h b/src/msg/async/dpdk/circular_buffer.h
|
||||
index 2c92c120444..bf5d422dac6 100644
|
||||
--- a/src/msg/async/dpdk/circular_buffer.h
|
||||
+++ b/src/msg/async/dpdk/circular_buffer.h
|
||||
@@ -89,8 +89,12 @@ class circular_buffer {
|
||||
size_t mask(size_t idx) const;
|
||||
|
||||
template<typename CB, typename ValueType>
|
||||
- struct cbiterator : std::iterator<std::random_access_iterator_tag, ValueType> {
|
||||
- typedef std::iterator<std::random_access_iterator_tag, ValueType> super_t;
|
||||
+ struct cbiterator {
|
||||
+ using iterator_category = std::random_access_iterator_tag;
|
||||
+ using value_type = ValueType;
|
||||
+ using difference_type = std::ptrdiff_t;
|
||||
+ using pointer = ValueType*;
|
||||
+ using reference = ValueType&;
|
||||
|
||||
ValueType& operator*() const { return cb->_impl.storage[cb->mask(idx)]; }
|
||||
ValueType* operator->() const { return &cb->_impl.storage[cb->mask(idx)]; }
|
||||
@@ -116,17 +120,17 @@ class circular_buffer {
|
||||
idx--;
|
||||
return v;
|
||||
}
|
||||
- cbiterator<CB, ValueType> operator+(typename super_t::difference_type n) const {
|
||||
+ cbiterator<CB, ValueType> operator+(difference_type n) const {
|
||||
return cbiterator<CB, ValueType>(cb, idx + n);
|
||||
}
|
||||
- cbiterator<CB, ValueType> operator-(typename super_t::difference_type n) const {
|
||||
+ cbiterator<CB, ValueType> operator-(difference_type n) const {
|
||||
return cbiterator<CB, ValueType>(cb, idx - n);
|
||||
}
|
||||
- cbiterator<CB, ValueType>& operator+=(typename super_t::difference_type n) {
|
||||
+ cbiterator<CB, ValueType>& operator+=(difference_type n) {
|
||||
idx += n;
|
||||
return *this;
|
||||
}
|
||||
- cbiterator<CB, ValueType>& operator-=(typename super_t::difference_type n) {
|
||||
+ cbiterator<CB, ValueType>& operator-=(difference_type n) {
|
||||
idx -= n;
|
||||
return *this;
|
||||
}
|
||||
@@ -148,7 +152,7 @@ class circular_buffer {
|
||||
bool operator<=(const cbiterator<CB, ValueType>& rhs) const {
|
||||
return idx <= rhs.idx;
|
||||
}
|
||||
- typename super_t::difference_type operator-(const cbiterator<CB, ValueType>& rhs) const {
|
||||
+ difference_type operator-(const cbiterator<CB, ValueType>& rhs) const {
|
||||
return idx - rhs.idx;
|
||||
}
|
||||
private:
|
@ -1,11 +0,0 @@
|
||||
diff --git a/src/logrotate.conf b/src/logrotate.conf
|
||||
index a9a452dd656..7949bebf49b 100644
|
||||
--- a/src/logrotate.conf
|
||||
+++ b/src/logrotate.conf
|
||||
@@ -8,5 +8,6 @@
|
||||
endscript
|
||||
missingok
|
||||
notifempty
|
||||
+ ignoreduplicates
|
||||
su root ceph
|
||||
}
|
@ -1,50 +0,0 @@
|
||||
diff --git a/src/rgw/rgw_asio_client.cc b/src/rgw/rgw_asio_client.cc
|
||||
index 82d5d43f8b8..ad6723a6436 100644
|
||||
--- a/src/rgw/rgw_asio_client.cc
|
||||
+++ b/src/rgw/rgw_asio_client.cc
|
||||
@@ -39,11 +39,11 @@ int ClientIO::init_env(CephContext *cct)
|
||||
const auto& value = header->value();
|
||||
|
||||
if (field == beast::http::field::content_length) {
|
||||
- env.set("CONTENT_LENGTH", value.to_string());
|
||||
+ env.set("CONTENT_LENGTH", value);
|
||||
continue;
|
||||
}
|
||||
if (field == beast::http::field::content_type) {
|
||||
- env.set("CONTENT_TYPE", value.to_string());
|
||||
+ env.set("CONTENT_TYPE", value);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -62,26 +62,26 @@ int ClientIO::init_env(CephContext *cct)
|
||||
}
|
||||
*dest = '\0';
|
||||
|
||||
- env.set(buf, value.to_string());
|
||||
+ env.set(buf, value);
|
||||
}
|
||||
|
||||
int major = request.version() / 10;
|
||||
int minor = request.version() % 10;
|
||||
env.set("HTTP_VERSION", std::to_string(major) + '.' + std::to_string(minor));
|
||||
|
||||
- env.set("REQUEST_METHOD", request.method_string().to_string());
|
||||
+ env.set("REQUEST_METHOD", request.method_string());
|
||||
|
||||
// split uri from query
|
||||
auto uri = request.target();
|
||||
auto pos = uri.find('?');
|
||||
if (pos != uri.npos) {
|
||||
auto query = uri.substr(pos + 1);
|
||||
- env.set("QUERY_STRING", query.to_string());
|
||||
+ env.set("QUERY_STRING", query);
|
||||
uri = uri.substr(0, pos);
|
||||
}
|
||||
- env.set("SCRIPT_URI", uri.to_string());
|
||||
+ env.set("SCRIPT_URI", uri);
|
||||
|
||||
- env.set("REQUEST_URI", request.target().to_string());
|
||||
+ env.set("REQUEST_URI", request.target());
|
||||
|
||||
char port_buf[16];
|
||||
snprintf(port_buf, sizeof(port_buf), "%d", local_endpoint.port());
|
@ -1,48 +0,0 @@
|
||||
diff --git a/src/pybind/rbd/c_rbd.pxd b/src/pybind/rbd/c_rbd.pxd
|
||||
index 275984209f7..d5aed200b74 100644
|
||||
--- a/src/pybind/rbd/c_rbd.pxd
|
||||
+++ b/src/pybind/rbd/c_rbd.pxd
|
||||
@@ -8,7 +8,7 @@ cdef extern from "rados/librados.h":
|
||||
_LIBRADOS_SNAP_HEAD "LIBRADOS_SNAP_HEAD"
|
||||
|
||||
cdef extern from "rbd/librbd.h":
|
||||
- ctypedef int (*librbd_progress_fn_t)(uint64_t offset, uint64_t total, void* ptr)
|
||||
+ ctypedef int (*librbd_progress_fn_t)(uint64_t offset, uint64_t total, void* ptr) noexcept
|
||||
|
||||
cdef extern from "rbd/librbd.h" nogil:
|
||||
enum:
|
||||
@@ -282,7 +282,7 @@ cdef extern from "rbd/librbd.h" nogil:
|
||||
|
||||
ctypedef void* rbd_encryption_options_t
|
||||
|
||||
- ctypedef void (*rbd_callback_t)(rbd_completion_t cb, void *arg)
|
||||
+ ctypedef void (*rbd_callback_t)(rbd_completion_t cb, void *arg) noexcept
|
||||
|
||||
void rbd_version(int *major, int *minor, int *extra)
|
||||
|
||||
diff --git a/src/pybind/rbd/rbd.pyx b/src/pybind/rbd/rbd.pyx
|
||||
index 16014f1409c..02b8a01901d 100644
|
||||
--- a/src/pybind/rbd/rbd.pyx
|
||||
+++ b/src/pybind/rbd/rbd.pyx
|
||||
@@ -370,10 +370,10 @@ ELSE:
|
||||
cdef rados_ioctx_t convert_ioctx(rados.Ioctx ioctx) except? NULL:
|
||||
return <rados_ioctx_t>ioctx.io
|
||||
|
||||
-cdef int progress_callback(uint64_t offset, uint64_t total, void* ptr) with gil:
|
||||
+cdef int progress_callback(uint64_t offset, uint64_t total, void* ptr) noexcept with gil:
|
||||
return (<object>ptr)(offset, total)
|
||||
|
||||
-cdef int no_op_progress_callback(uint64_t offset, uint64_t total, void* ptr):
|
||||
+cdef int no_op_progress_callback(uint64_t offset, uint64_t total, void* ptr) noexcept:
|
||||
return 0
|
||||
|
||||
def cstr(val, name, encoding="utf-8", opt=False):
|
||||
@@ -425,7 +425,7 @@ RBD_MIRROR_PEER_ATTRIBUTE_NAME_KEY = decode_cstr(_RBD_MIRROR_PEER_ATTRIBUTE_NAME
|
||||
|
||||
cdef class Completion
|
||||
|
||||
-cdef void __aio_complete_cb(rbd_completion_t completion, void *args) with gil:
|
||||
+cdef void __aio_complete_cb(rbd_completion_t completion, void *args) noexcept with gil:
|
||||
"""
|
||||
Callback to oncomplete() for asynchronous operations
|
||||
"""
|
@ -1,10 +0,0 @@
|
||||
diff --git a/src/pybind/mgr/dashboard/constraints.txt b/src/pybind/mgr/dashboard/constraints.txt
|
||||
index 55f81c92dec..c3620497f42 100644
|
||||
--- a/src/pybind/mgr/dashboard/constraints.txt
|
||||
+++ b/src/pybind/mgr/dashboard/constraints.txt
|
||||
@@ -1,4 +1,4 @@
|
||||
-CherryPy~=13.1
|
||||
+CherryPy~=18.7
|
||||
more-itertools~=8.14
|
||||
PyJWT~=2.0
|
||||
bcrypt~=3.1
|
@ -1,13 +0,0 @@
|
||||
diff --git a/src/pybind/mgr/dashboard/frontend/CMakeLists.txt b/src/pybind/mgr/dashboard/frontend/CMakeLists.txt
|
||||
index 4fd2130b93a..7df5de57c66 100644
|
||||
--- a/src/pybind/mgr/dashboard/frontend/CMakeLists.txt
|
||||
+++ b/src/pybind/mgr/dashboard/frontend/CMakeLists.txt
|
||||
@@ -63,7 +63,7 @@ else(WITH_SYSTEM_NPM)
|
||||
OUTPUT "${mgr-dashboard-nodeenv-dir}/bin/npm"
|
||||
COMMAND ${CMAKE_SOURCE_DIR}/src/tools/setup-virtualenv.sh --python=${MGR_PYTHON_EXECUTABLE} ${mgr-dashboard-nodeenv-dir}
|
||||
COMMAND ${mgr-dashboard-nodeenv-dir}/bin/pip install nodeenv
|
||||
- COMMAND ${mgr-dashboard-nodeenv-dir}/bin/nodeenv --verbose ${node_mirror_opt} -p --node=14.15.1
|
||||
+ COMMAND ${mgr-dashboard-nodeenv-dir}/bin/nodeenv --verbose ${node_mirror_opt} -p --node=16.15.0
|
||||
COMMAND mkdir ${mgr-dashboard-nodeenv-dir}/.npm
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMENT "dashboard nodeenv is being installed")
|
@ -1,10 +0,0 @@
|
||||
diff --git a/src/pybind/mgr/dashboard/requirements-lint.txt b/src/pybind/mgr/dashboard/requirements-lint.txt
|
||||
index 1b15f46817c..89a6496098e 100644
|
||||
--- a/src/pybind/mgr/dashboard/requirements-lint.txt
|
||||
+++ b/src/pybind/mgr/dashboard/requirements-lint.txt
|
||||
@@ -1,4 +1,4 @@
|
||||
-pylint==2.6.0
|
||||
+pylint==2.17.4
|
||||
flake8==3.9.0
|
||||
flake8-colors==0.1.6
|
||||
#TODO: Fix docstring issues: https://tracker.ceph.com/issues/41224
|
@ -1,577 +0,0 @@
|
||||
From a5509f93a7b02e31950d1aba0625dd9996c9608e Mon Sep 17 00:00:00 2001
|
||||
From: Pere Diaz Bou <pere-altea@hotmail.com>
|
||||
Date: Tue, 25 Jul 2023 17:27:14 +0200
|
||||
Subject: [PATCH 1/2] os/bluestore: test log runway expansion error
|
||||
|
||||
Signed-off-by: Pere Diaz Bou <pere-altea@hotmail.com>
|
||||
---
|
||||
src/test/objectstore/test_bluefs.cc | 153 ++++++++++++++++++++++++++++
|
||||
1 file changed, 153 insertions(+)
|
||||
|
||||
diff --git a/src/test/objectstore/test_bluefs.cc b/src/test/objectstore/test_bluefs.cc
|
||||
index 4f77d8597ae1d..75496a89d2c39 100644
|
||||
--- a/src/test/objectstore/test_bluefs.cc
|
||||
+++ b/src/test/objectstore/test_bluefs.cc
|
||||
@@ -1401,6 +1401,159 @@ TEST(BlueFS, test_concurrent_dir_link_and_compact_log_56210) {
|
||||
}
|
||||
}
|
||||
|
||||
+TEST(BlueFS, test_log_runway) {
|
||||
+ uint64_t max_log_runway = 65536;
|
||||
+ ConfSaver conf(g_ceph_context->_conf);
|
||||
+ conf.SetVal("bluefs_compact_log_sync", "false");
|
||||
+ conf.SetVal("bluefs_min_log_runway", "32768");
|
||||
+ conf.SetVal("bluefs_max_log_runway", std::to_string(max_log_runway).c_str());
|
||||
+ conf.ApplyChanges();
|
||||
+
|
||||
+ uint64_t size = 1048576 * 128;
|
||||
+ TempBdev bdev{size};
|
||||
+ BlueFS fs(g_ceph_context);
|
||||
+ ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
|
||||
+ uuid_d fsid;
|
||||
+ ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
|
||||
+ ASSERT_EQ(0, fs.mount());
|
||||
+ ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
|
||||
+ // longer transaction than current runway
|
||||
+ std::string longdir(max_log_runway, 'a');
|
||||
+ fs.mkdir(longdir);
|
||||
+ {
|
||||
+ BlueFS::FileWriter *h;
|
||||
+ ASSERT_EQ(0, fs.mkdir("dir"));
|
||||
+ ASSERT_EQ(0, fs.open_for_write("dir", "file", &h, false));
|
||||
+ h->append("foo", 3);
|
||||
+ h->append("bar", 3);
|
||||
+ h->append("baz", 3);
|
||||
+ fs.fsync(h);
|
||||
+ fs.close_writer(h);
|
||||
+ }
|
||||
+ fs.umount(true);
|
||||
+ fs.mount();
|
||||
+
|
||||
+ std::vector<std::string> ls;
|
||||
+ fs.readdir("dir", &ls);
|
||||
+ ASSERT_EQ(ls.front(), "file");
|
||||
+ uint64_t file_size = 0;
|
||||
+ utime_t mtime;
|
||||
+ fs.stat("dir", "file", &file_size, &mtime);
|
||||
+ ASSERT_EQ(file_size, 9);
|
||||
+}
|
||||
+
|
||||
+TEST(BlueFS, test_log_runway_2) {
|
||||
+ uint64_t max_log_runway = 65536;
|
||||
+ ConfSaver conf(g_ceph_context->_conf);
|
||||
+ conf.SetVal("bluefs_compact_log_sync", "false");
|
||||
+ conf.SetVal("bluefs_min_log_runway", "32768");
|
||||
+ conf.SetVal("bluefs_max_log_runway", std::to_string(max_log_runway).c_str());
|
||||
+ conf.ApplyChanges();
|
||||
+
|
||||
+ uint64_t size = 1048576 * 128;
|
||||
+ TempBdev bdev{size};
|
||||
+ BlueFS fs(g_ceph_context);
|
||||
+ ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
|
||||
+ uuid_d fsid;
|
||||
+ ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
|
||||
+ ASSERT_EQ(0, fs.mount());
|
||||
+ ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
|
||||
+ // longer transaction than current runway
|
||||
+ std::string longdir(max_log_runway * 2, 'a');
|
||||
+ std::string longfile(max_log_runway * 2, 'b');
|
||||
+ {
|
||||
+ BlueFS::FileWriter *h;
|
||||
+ ASSERT_EQ(0, fs.mkdir(longdir));
|
||||
+ ASSERT_EQ(0, fs.open_for_write(longdir, longfile, &h, false));
|
||||
+ h->append("canary", 6);
|
||||
+ fs.fsync(h);
|
||||
+ fs.close_writer(h);
|
||||
+ fs.sync_metadata(true);
|
||||
+ }
|
||||
+ {
|
||||
+ BlueFS::FileWriter *h;
|
||||
+ ASSERT_EQ(0, fs.mkdir("dir"));
|
||||
+ ASSERT_EQ(0, fs.open_for_write("dir", "file", &h, false));
|
||||
+ h->append("foo", 3);
|
||||
+ h->append("bar", 3);
|
||||
+ h->append("baz", 3);
|
||||
+ fs.fsync(h);
|
||||
+ fs.close_writer(h);
|
||||
+ }
|
||||
+ fs.umount(true);
|
||||
+ fs.mount();
|
||||
+
|
||||
+ std::vector<std::string> ls;
|
||||
+ fs.readdir("dir", &ls);
|
||||
+ ASSERT_EQ(ls.front(), "file");
|
||||
+ uint64_t file_size = 0;
|
||||
+ utime_t mtime;
|
||||
+ fs.stat("dir", "file", &file_size, &mtime);
|
||||
+ ASSERT_EQ(file_size, 9);
|
||||
+ fs.stat(longdir, longfile, &file_size, &mtime);
|
||||
+ ASSERT_EQ(file_size, 6);
|
||||
+}
|
||||
+
|
||||
+TEST(BlueFS, test_log_runway_3) {
|
||||
+ uint64_t max_log_runway = 65536;
|
||||
+ ConfSaver conf(g_ceph_context->_conf);
|
||||
+ conf.SetVal("bluefs_alloc_size", "4096");
|
||||
+ conf.SetVal("bluefs_shared_alloc_size", "4096");
|
||||
+ conf.SetVal("bluefs_compact_log_sync", "false");
|
||||
+ conf.SetVal("bluefs_min_log_runway", "32768");
|
||||
+ conf.SetVal("bluefs_max_log_runway", std::to_string(max_log_runway).c_str());
|
||||
+ conf.ApplyChanges();
|
||||
+
|
||||
+ uint64_t size = 1048576 * 128;
|
||||
+ TempBdev bdev{size};
|
||||
+ BlueFS fs(g_ceph_context);
|
||||
+ ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
|
||||
+ uuid_d fsid;
|
||||
+ ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
|
||||
+ ASSERT_EQ(0, fs.mount());
|
||||
+ ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
|
||||
+ // longer transaction than current runway
|
||||
+ for (size_t m = 0; m < 40; m++) {
|
||||
+ std::string longdir(max_log_runway + m, 'A' + m);
|
||||
+ std::string longfile(max_log_runway + m, 'A' + m);
|
||||
+ BlueFS::FileWriter *h;
|
||||
+ ASSERT_EQ(0, fs.mkdir(longdir));
|
||||
+ ASSERT_EQ(0, fs.open_for_write(longdir, longfile, &h, false));
|
||||
+ h->append("canary", 6);
|
||||
+ fs.fsync(h);
|
||||
+ fs.close_writer(h);
|
||||
+ fs.sync_metadata(true);
|
||||
+ }
|
||||
+ {
|
||||
+ BlueFS::FileWriter *h;
|
||||
+ ASSERT_EQ(0, fs.mkdir("dir"));
|
||||
+ ASSERT_EQ(0, fs.open_for_write("dir", "file", &h, false));
|
||||
+ h->append("foo", 3);
|
||||
+ h->append("bar", 3);
|
||||
+ h->append("baz", 3);
|
||||
+ fs.fsync(h);
|
||||
+ fs.close_writer(h);
|
||||
+ }
|
||||
+ fs.umount(true);
|
||||
+ fs.mount();
|
||||
+
|
||||
+ std::vector<std::string> ls;
|
||||
+ fs.readdir("dir", &ls);
|
||||
+ ASSERT_EQ(ls.front(), "file");
|
||||
+ uint64_t file_size = 0;
|
||||
+ utime_t mtime;
|
||||
+ fs.stat("dir", "file", &file_size, &mtime);
|
||||
+ ASSERT_EQ(file_size, 9);
|
||||
+ for (size_t m = 0; m < 40; m++) {
|
||||
+ uint64_t file_size = 0;
|
||||
+ utime_t mtime;
|
||||
+ std::string longdir(max_log_runway + m, 'A' + m);
|
||||
+ std::string longfile(max_log_runway + m, 'A' + m);
|
||||
+ fs.stat(longdir, longfile, &file_size, &mtime);
|
||||
+ ASSERT_EQ(file_size, 6);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
int main(int argc, char **argv) {
|
||||
auto args = argv_to_vec(argc, argv);
|
||||
map<string,string> defaults = {
|
||||
|
||||
From e97aa857c241a9694871de3d3a4079c4d9a120d3 Mon Sep 17 00:00:00 2001
|
||||
From: Pere Diaz Bou <pere-altea@hotmail.com>
|
||||
Date: Tue, 25 Jul 2023 17:28:14 +0200
|
||||
Subject: [PATCH 2/2] os/bluestore: fix bluefs log runway enospc
|
||||
|
||||
With these changes, every call to log compaction will try to expand its
|
||||
runway in case of insufficient log space. async compaction will ignore
|
||||
the `log_forbidden_to_expand` atomic since we know it should't be
|
||||
harmful. In any other case, expansion of log will wait until compaction
|
||||
is completed.
|
||||
|
||||
in order to ensure op_file_update_inc fits on disk we increase the size
|
||||
of logs as previously used in _maybe_extend_log. This means we too bring
|
||||
back _maybe_extend_log with a different usage.
|
||||
|
||||
_maybe_extend_log increases the size of the log if the runway is less
|
||||
than the min runway and if the current transaction is too big to fit.
|
||||
|
||||
Fixes: https://tracker.ceph.com/issues/58759
|
||||
Signed-off-by: Pere Diaz Bou <pere-altea@hotmail.com>
|
||||
---
|
||||
src/os/bluestore/BlueFS.cc | 157 ++++++++++++++--------------
|
||||
src/os/bluestore/BlueFS.h | 7 +-
|
||||
src/os/bluestore/bluefs_types.cc | 19 ++++
|
||||
src/os/bluestore/bluefs_types.h | 2 +
|
||||
src/test/objectstore/test_bluefs.cc | 9 +-
|
||||
5 files changed, 110 insertions(+), 84 deletions(-)
|
||||
|
||||
diff --git a/src/os/bluestore/BlueFS.cc b/src/os/bluestore/BlueFS.cc
|
||||
index 8454ddaf8078c..19953b130d62a 100644
|
||||
--- a/src/os/bluestore/BlueFS.cc
|
||||
+++ b/src/os/bluestore/BlueFS.cc
|
||||
@@ -20,15 +20,12 @@ using TOPNSPC::common::cmd_getval;
|
||||
|
||||
using std::byte;
|
||||
using std::list;
|
||||
-using std::make_pair;
|
||||
using std::map;
|
||||
using std::ostream;
|
||||
-using std::pair;
|
||||
using std::set;
|
||||
using std::string;
|
||||
using std::to_string;
|
||||
using std::vector;
|
||||
-using std::chrono::duration;
|
||||
using std::chrono::seconds;
|
||||
|
||||
using ceph::bufferlist;
|
||||
@@ -2745,14 +2742,19 @@ void BlueFS::_compact_log_async_LD_LNF_D() //also locks FW for new_writer
|
||||
// Part 0.
|
||||
// Lock the log and forbid its expansion and other compactions
|
||||
|
||||
+ // lock log's run-time structures for a while
|
||||
+ log.lock.lock();
|
||||
+
|
||||
+ // Extend log in case of having a big transaction waiting before starting compaction.
|
||||
+ _maybe_extend_log();
|
||||
+
|
||||
// only one compaction allowed at one time
|
||||
bool old_is_comp = std::atomic_exchange(&log_is_compacting, true);
|
||||
if (old_is_comp) {
|
||||
dout(10) << __func__ << " ongoing" <<dendl;
|
||||
+ log.lock.unlock();
|
||||
return;
|
||||
}
|
||||
- // lock log's run-time structures for a while
|
||||
- log.lock.lock();
|
||||
auto t0 = mono_clock::now();
|
||||
|
||||
// Part 1.
|
||||
@@ -2763,7 +2765,7 @@ void BlueFS::_compact_log_async_LD_LNF_D() //also locks FW for new_writer
|
||||
// During that, no one else can write to log, otherwise we risk jumping backwards.
|
||||
// We need to sync log, because we are injecting discontinuity, and writer is not prepared for that.
|
||||
|
||||
- //signal _maybe_extend_log that expansion of log is temporary inacceptable
|
||||
+ //signal _extend_log that expansion of log is temporary inacceptable
|
||||
bool old_forbidden = atomic_exchange(&log_forbidden_to_expand, true);
|
||||
ceph_assert(old_forbidden == false);
|
||||
|
||||
@@ -2779,9 +2781,9 @@ void BlueFS::_compact_log_async_LD_LNF_D() //also locks FW for new_writer
|
||||
|
||||
// 1.1 allocate new log extents and store them at fnode_tail
|
||||
File *log_file = log.writer->file.get();
|
||||
+
|
||||
old_log_jump_to = log_file->fnode.get_allocated();
|
||||
bluefs_fnode_t fnode_tail;
|
||||
- uint64_t runway = log_file->fnode.get_allocated() - log.writer->get_effective_write_pos();
|
||||
dout(10) << __func__ << " old_log_jump_to 0x" << std::hex << old_log_jump_to
|
||||
<< " need 0x" << cct->_conf->bluefs_max_log_runway << std::dec << dendl;
|
||||
int r = _allocate(vselector->select_prefer_bdev(log_file->vselector_hint),
|
||||
@@ -2809,7 +2811,7 @@ void BlueFS::_compact_log_async_LD_LNF_D() //also locks FW for new_writer
|
||||
// TODO - think - if _flush_and_sync_log_jump will not add dirty files nor release pending allocations
|
||||
// then flush_bdev() will not be necessary
|
||||
_flush_bdev();
|
||||
- _flush_and_sync_log_jump_D(old_log_jump_to, runway);
|
||||
+ _flush_and_sync_log_jump_D(old_log_jump_to);
|
||||
|
||||
//
|
||||
// Part 2.
|
||||
@@ -3054,54 +3056,68 @@ void BlueFS::_consume_dirty(uint64_t seq)
|
||||
}
|
||||
}
|
||||
|
||||
-// Extends log if its free space is smaller then bluefs_min_log_runway.
|
||||
-// Returns space available *BEFORE* adding new space. Signed for additional <0 detection.
|
||||
-int64_t BlueFS::_maybe_extend_log()
|
||||
-{
|
||||
+int64_t BlueFS::_maybe_extend_log() {
|
||||
+ uint64_t runway = log.writer->file->fnode.get_allocated() - log.writer->get_effective_write_pos();
|
||||
+ // increasing the size of the log involves adding a OP_FILE_UPDATE_INC which its size will
|
||||
+ // increase with respect the number of extents. bluefs_min_log_runway should cover the max size
|
||||
+ // a log can get.
|
||||
+ // inject new allocation in case log is too big
|
||||
+ size_t expected_log_size = 0;
|
||||
+ log.t.bound_encode(expected_log_size);
|
||||
+ if (expected_log_size + cct->_conf->bluefs_min_log_runway > runway) {
|
||||
+ _extend_log(expected_log_size + cct->_conf->bluefs_max_log_runway);
|
||||
+ } else if (runway < cct->_conf->bluefs_min_log_runway) {
|
||||
+ _extend_log(cct->_conf->bluefs_max_log_runway);
|
||||
+ }
|
||||
+ runway = log.writer->file->fnode.get_allocated() - log.writer->get_effective_write_pos();
|
||||
+ return runway;
|
||||
+}
|
||||
+
|
||||
+void BlueFS::_extend_log(uint64_t amount) {
|
||||
ceph_assert(ceph_mutex_is_locked(log.lock));
|
||||
- // allocate some more space (before we run out)?
|
||||
- // BTW: this triggers `flush()` in the `page_aligned_appender` of `log.writer`.
|
||||
- int64_t runway = log.writer->file->fnode.get_allocated() -
|
||||
- log.writer->get_effective_write_pos();
|
||||
- if (runway < (int64_t)cct->_conf->bluefs_min_log_runway) {
|
||||
- dout(10) << __func__ << " allocating more log runway (0x"
|
||||
- << std::hex << runway << std::dec << " remaining)" << dendl;
|
||||
- /*
|
||||
- * Usually, when we are low on space in log, we just allocate new extent,
|
||||
- * put update op(log) to log and we are fine.
|
||||
- * Problem - it interferes with log compaction:
|
||||
- * New log produced in compaction will include - as last op - jump into some offset (anchor) of current log.
|
||||
- * It is assumed that log region (anchor - end) will contain all changes made by bluefs since
|
||||
- * full state capture into new log.
|
||||
- * Putting log update into (anchor - end) region is illegal, because any update there must be compatible with
|
||||
- * both logs, but old log is different then new log.
|
||||
- *
|
||||
- * Possible solutions:
|
||||
- * - stall extending log until we finish compacting and switch log (CURRENT)
|
||||
- * - re-run compaction with more runway for old log
|
||||
- * - add OP_FILE_ADDEXT that adds extent; will be compatible with both logs
|
||||
- */
|
||||
- if (log_forbidden_to_expand.load() == true) {
|
||||
- return -EWOULDBLOCK;
|
||||
- }
|
||||
- vselector->sub_usage(log.writer->file->vselector_hint, log.writer->file->fnode);
|
||||
- int r = _allocate(
|
||||
+ std::unique_lock<ceph::mutex> ll(log.lock, std::adopt_lock);
|
||||
+ while (log_forbidden_to_expand.load() == true) {
|
||||
+ log_cond.wait(ll);
|
||||
+ }
|
||||
+ ll.release();
|
||||
+ uint64_t allocated_before_extension = log.writer->file->fnode.get_allocated();
|
||||
+ vselector->sub_usage(log.writer->file->vselector_hint, log.writer->file->fnode);
|
||||
+ amount = round_up_to(amount, super.block_size);
|
||||
+ int r = _allocate(
|
||||
vselector->select_prefer_bdev(log.writer->file->vselector_hint),
|
||||
- cct->_conf->bluefs_max_log_runway,
|
||||
+ amount,
|
||||
0,
|
||||
&log.writer->file->fnode);
|
||||
- ceph_assert(r == 0);
|
||||
- vselector->add_usage(log.writer->file->vselector_hint, log.writer->file->fnode);
|
||||
- log.t.op_file_update_inc(log.writer->file->fnode);
|
||||
+ ceph_assert(r == 0);
|
||||
+ dout(10) << "extended log by 0x" << std::hex << amount << " bytes " << dendl;
|
||||
+ vselector->add_usage(log.writer->file->vselector_hint, log.writer->file->fnode);
|
||||
+
|
||||
+ bluefs_transaction_t log_extend_transaction;
|
||||
+ log_extend_transaction.seq = log.t.seq;
|
||||
+ log_extend_transaction.uuid = log.t.uuid;
|
||||
+ log_extend_transaction.op_file_update_inc(log.writer->file->fnode);
|
||||
+
|
||||
+ bufferlist bl;
|
||||
+ bl.reserve(super.block_size);
|
||||
+ encode(log_extend_transaction, bl);
|
||||
+ _pad_bl(bl, super.block_size);
|
||||
+ log.writer->append(bl);
|
||||
+ ceph_assert(allocated_before_extension >= log.writer->get_effective_write_pos());
|
||||
+ log.t.seq = log.seq_live;
|
||||
+
|
||||
+ // before sync_core we advance the seq
|
||||
+ {
|
||||
+ std::unique_lock<ceph::mutex> l(dirty.lock);
|
||||
+ _log_advance_seq();
|
||||
}
|
||||
- return runway;
|
||||
}
|
||||
|
||||
-void BlueFS::_flush_and_sync_log_core(int64_t runway)
|
||||
+void BlueFS::_flush_and_sync_log_core()
|
||||
{
|
||||
ceph_assert(ceph_mutex_is_locked(log.lock));
|
||||
dout(10) << __func__ << " " << log.t << dendl;
|
||||
|
||||
+
|
||||
bufferlist bl;
|
||||
bl.reserve(super.block_size);
|
||||
encode(log.t, bl);
|
||||
@@ -3113,10 +3129,11 @@ void BlueFS::_flush_and_sync_log_core(int64_t runway)
|
||||
logger->inc(l_bluefs_log_write_count, 1);
|
||||
logger->inc(l_bluefs_logged_bytes, bl.length());
|
||||
|
||||
- if (true) {
|
||||
- ceph_assert(bl.length() <= runway); // if we write this, we will have an unrecoverable data loss
|
||||
- // transaction will not fit extents before growth -> data loss on _replay
|
||||
- }
|
||||
+ uint64_t runway = log.writer->file->fnode.get_allocated() - log.writer->get_effective_write_pos();
|
||||
+ // ensure runway is big enough, this should be taken care of by _maybe_extend_log,
|
||||
+ // but let's keep this here just in case.
|
||||
+ ceph_assert(bl.length() <= runway);
|
||||
+
|
||||
|
||||
log.writer->append(bl);
|
||||
|
||||
@@ -3185,31 +3202,15 @@ void BlueFS::_release_pending_allocations(vector<interval_set<uint64_t>>& to_rel
|
||||
|
||||
int BlueFS::_flush_and_sync_log_LD(uint64_t want_seq)
|
||||
{
|
||||
- int64_t available_runway;
|
||||
- do {
|
||||
- log.lock.lock();
|
||||
- dirty.lock.lock();
|
||||
- if (want_seq && want_seq <= dirty.seq_stable) {
|
||||
- dout(10) << __func__ << " want_seq " << want_seq << " <= seq_stable "
|
||||
- << dirty.seq_stable << ", done" << dendl;
|
||||
- dirty.lock.unlock();
|
||||
- log.lock.unlock();
|
||||
- return 0;
|
||||
- }
|
||||
-
|
||||
- available_runway = _maybe_extend_log();
|
||||
- if (available_runway == -EWOULDBLOCK) {
|
||||
- // we are in need of adding runway, but we are during log-switch from compaction
|
||||
- dirty.lock.unlock();
|
||||
- //instead log.lock.unlock() do move ownership
|
||||
- std::unique_lock<ceph::mutex> ll(log.lock, std::adopt_lock);
|
||||
- while (log_forbidden_to_expand.load()) {
|
||||
- log_cond.wait(ll);
|
||||
- }
|
||||
- } else {
|
||||
- ceph_assert(available_runway >= 0);
|
||||
- }
|
||||
- } while (available_runway < 0);
|
||||
+ log.lock.lock();
|
||||
+ dirty.lock.lock();
|
||||
+ if (want_seq && want_seq <= dirty.seq_stable) {
|
||||
+ dout(10) << __func__ << " want_seq " << want_seq << " <= seq_stable "
|
||||
+ << dirty.seq_stable << ", done" << dendl;
|
||||
+ dirty.lock.unlock();
|
||||
+ log.lock.unlock();
|
||||
+ return 0;
|
||||
+ }
|
||||
|
||||
ceph_assert(want_seq == 0 || want_seq <= dirty.seq_live); // illegal to request seq that was not created yet
|
||||
uint64_t seq =_log_advance_seq();
|
||||
@@ -3218,7 +3219,8 @@ int BlueFS::_flush_and_sync_log_LD(uint64_t want_seq)
|
||||
to_release.swap(dirty.pending_release);
|
||||
dirty.lock.unlock();
|
||||
|
||||
- _flush_and_sync_log_core(available_runway);
|
||||
+ _maybe_extend_log();
|
||||
+ _flush_and_sync_log_core();
|
||||
_flush_bdev(log.writer);
|
||||
logger->set(l_bluefs_log_bytes, log.writer->file->fnode.size);
|
||||
//now log.lock is no longer needed
|
||||
@@ -3232,8 +3234,7 @@ int BlueFS::_flush_and_sync_log_LD(uint64_t want_seq)
|
||||
}
|
||||
|
||||
// Flushes log and immediately adjusts log_writer pos.
|
||||
-int BlueFS::_flush_and_sync_log_jump_D(uint64_t jump_to,
|
||||
- int64_t available_runway)
|
||||
+int BlueFS::_flush_and_sync_log_jump_D(uint64_t jump_to)
|
||||
{
|
||||
ceph_assert(ceph_mutex_is_locked(log.lock));
|
||||
|
||||
@@ -3246,7 +3247,7 @@ int BlueFS::_flush_and_sync_log_jump_D(uint64_t jump_to,
|
||||
vector<interval_set<uint64_t>> to_release(dirty.pending_release.size());
|
||||
to_release.swap(dirty.pending_release);
|
||||
dirty.lock.unlock();
|
||||
- _flush_and_sync_log_core(available_runway);
|
||||
+ _flush_and_sync_log_core();
|
||||
|
||||
dout(10) << __func__ << " jumping log offset from 0x" << std::hex
|
||||
<< log.writer->pos << " -> 0x" << jump_to << std::dec << dendl;
|
||||
diff --git a/src/os/bluestore/BlueFS.h b/src/os/bluestore/BlueFS.h
|
||||
index adfc8eb0a235b..4c89baea3a6c1 100644
|
||||
--- a/src/os/bluestore/BlueFS.h
|
||||
+++ b/src/os/bluestore/BlueFS.h
|
||||
@@ -453,15 +453,14 @@ class BlueFS {
|
||||
#endif
|
||||
|
||||
int64_t _maybe_extend_log();
|
||||
- void _extend_log();
|
||||
+ void _extend_log(uint64_t amount);
|
||||
uint64_t _log_advance_seq();
|
||||
void _consume_dirty(uint64_t seq);
|
||||
void _clear_dirty_set_stable_D(uint64_t seq_stable);
|
||||
void _release_pending_allocations(std::vector<interval_set<uint64_t>>& to_release);
|
||||
|
||||
- void _flush_and_sync_log_core(int64_t available_runway);
|
||||
- int _flush_and_sync_log_jump_D(uint64_t jump_to,
|
||||
- int64_t available_runway);
|
||||
+ void _flush_and_sync_log_core();
|
||||
+ int _flush_and_sync_log_jump_D(uint64_t jump_to);
|
||||
int _flush_and_sync_log_LD(uint64_t want_seq = 0);
|
||||
|
||||
uint64_t _estimate_transaction_size(bluefs_transaction_t* t);
|
||||
diff --git a/src/os/bluestore/bluefs_types.cc b/src/os/bluestore/bluefs_types.cc
|
||||
index c8d2ede7bed92..70c8a4fbf1c56 100644
|
||||
--- a/src/os/bluestore/bluefs_types.cc
|
||||
+++ b/src/os/bluestore/bluefs_types.cc
|
||||
@@ -4,6 +4,7 @@
|
||||
#include <algorithm>
|
||||
#include "bluefs_types.h"
|
||||
#include "common/Formatter.h"
|
||||
+#include "include/denc.h"
|
||||
#include "include/uuid.h"
|
||||
#include "include/stringify.h"
|
||||
|
||||
@@ -218,6 +219,23 @@ std::ostream& operator<<(std::ostream& out, const bluefs_fnode_delta_t& delta)
|
||||
|
||||
// bluefs_transaction_t
|
||||
|
||||
+DENC_HELPERS
|
||||
+void bluefs_transaction_t::bound_encode(size_t &s) const {
|
||||
+ uint32_t crc = op_bl.crc32c(-1);
|
||||
+ DENC_START(1, 1, s);
|
||||
+ denc(uuid, s);
|
||||
+ denc_varint(seq, s);
|
||||
+ // not using bufferlist encode method, as it merely copies the bufferptr and not
|
||||
+ // contents, meaning we're left with fragmented target bl
|
||||
+ __u32 len = op_bl.length();
|
||||
+ denc(len, s);
|
||||
+ for (auto& it : op_bl.buffers()) {
|
||||
+ s += it.length();
|
||||
+ }
|
||||
+ denc(crc, s);
|
||||
+ DENC_FINISH(s);
|
||||
+}
|
||||
+
|
||||
void bluefs_transaction_t::encode(bufferlist& bl) const
|
||||
{
|
||||
uint32_t crc = op_bl.crc32c(-1);
|
||||
@@ -282,3 +300,4 @@ ostream& operator<<(ostream& out, const bluefs_transaction_t& t)
|
||||
<< " crc 0x" << t.op_bl.crc32c(-1)
|
||||
<< std::dec << ")";
|
||||
}
|
||||
+
|
||||
diff --git a/src/os/bluestore/bluefs_types.h b/src/os/bluestore/bluefs_types.h
|
||||
index d5d8ee5a62826..b0ce7c5c9d38d 100644
|
||||
--- a/src/os/bluestore/bluefs_types.h
|
||||
+++ b/src/os/bluestore/bluefs_types.h
|
||||
@@ -308,6 +308,7 @@ struct bluefs_transaction_t {
|
||||
encode(delta, op_bl);
|
||||
file.reset_delta();
|
||||
}
|
||||
+
|
||||
void op_file_remove(uint64_t ino) {
|
||||
using ceph::encode;
|
||||
encode((__u8)OP_FILE_REMOVE, op_bl);
|
||||
@@ -328,6 +329,7 @@ struct bluefs_transaction_t {
|
||||
op_bl.claim_append(from.op_bl);
|
||||
}
|
||||
|
||||
+ void bound_encode(size_t &s) const;
|
||||
void encode(ceph::buffer::list& bl) const;
|
||||
void decode(ceph::buffer::list::const_iterator& p);
|
||||
void dump(ceph::Formatter *f) const;
|
||||
diff --git a/src/test/objectstore/test_bluefs.cc b/src/test/objectstore/test_bluefs.cc
|
||||
index 75496a89d2c39..6d3ff1218a437 100644
|
||||
--- a/src/test/objectstore/test_bluefs.cc
|
||||
+++ b/src/test/objectstore/test_bluefs.cc
|
||||
@@ -1459,8 +1459,9 @@ TEST(BlueFS, test_log_runway_2) {
|
||||
ASSERT_EQ(0, fs.mount());
|
||||
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
|
||||
// longer transaction than current runway
|
||||
- std::string longdir(max_log_runway * 2, 'a');
|
||||
- std::string longfile(max_log_runway * 2, 'b');
|
||||
+ size_t name_length = max_log_runway * 2;
|
||||
+ std::string longdir(name_length, 'a');
|
||||
+ std::string longfile(name_length, 'b');
|
||||
{
|
||||
BlueFS::FileWriter *h;
|
||||
ASSERT_EQ(0, fs.mkdir(longdir));
|
||||
@@ -1492,6 +1493,10 @@ TEST(BlueFS, test_log_runway_2) {
|
||||
ASSERT_EQ(file_size, 9);
|
||||
fs.stat(longdir, longfile, &file_size, &mtime);
|
||||
ASSERT_EQ(file_size, 6);
|
||||
+
|
||||
+ std::vector<std::string> ls_longdir;
|
||||
+ fs.readdir(longdir, &ls_longdir);
|
||||
+ ASSERT_EQ(ls_longdir.front(), longfile);
|
||||
}
|
||||
|
||||
TEST(BlueFS, test_log_runway_3) {
|
@ -1,19 +0,0 @@
|
||||
diff --git a/src/test/erasure-code/TestErasureCodeShec_arguments.cc b/src/test/erasure-code/TestErasureCodeShec_arguments.cc
|
||||
index 075c6383eed..74403eaf6ed 100644
|
||||
--- a/src/test/erasure-code/TestErasureCodeShec_arguments.cc
|
||||
+++ b/src/test/erasure-code/TestErasureCodeShec_arguments.cc
|
||||
@@ -86,12 +86,12 @@ void create_table_shec432() {
|
||||
continue;
|
||||
}
|
||||
if (std::popcount(avails) == 4) {
|
||||
- auto a = to_array<std::initializer_list<int>>({
|
||||
+ std::vector<std::initializer_list<int>> a = {
|
||||
{0,1,2,3}, {0,1,2,4}, {0,1,2,6}, {0,1,3,4}, {0,1,3,6}, {0,1,4,6},
|
||||
{0,2,3,4}, {0,2,3,5}, {0,2,4,5}, {0,2,4,6}, {0,2,5,6}, {0,3,4,5},
|
||||
{0,3,4,6}, {0,3,5,6}, {0,4,5,6}, {1,2,3,4}, {1,2,3,5}, {1,2,4,5},
|
||||
{1,2,4,6}, {1,2,5,6}, {1,3,4,5}, {1,3,4,6}, {1,3,5,6}, {1,4,5,6},
|
||||
- {2,3,4,5}, {2,4,5,6}, {3,4,5,6}});
|
||||
+ {2,3,4,5}, {2,4,5,6}, {3,4,5,6}};
|
||||
if (ranges::any_of(a, std::bind_front(cmp_equal<uint, int>, avails),
|
||||
getint)) {
|
||||
vec.push_back(avails);
|
@ -1,206 +0,0 @@
|
||||
diff --git a/src/common/LogEntry.h b/src/common/LogEntry.h
|
||||
index 3ddebbd3043..b9096e2850a 100644
|
||||
--- a/src/common/LogEntry.h
|
||||
+++ b/src/common/LogEntry.h
|
||||
@@ -15,7 +15,11 @@
|
||||
#ifndef CEPH_LOGENTRY_H
|
||||
#define CEPH_LOGENTRY_H
|
||||
|
||||
+#include <fmt/core.h>
|
||||
#include <fmt/format.h>
|
||||
+#if FMT_VERSION >= 90000
|
||||
+#include <fmt/ostream.h>
|
||||
+#endif
|
||||
|
||||
#include "include/utime.h"
|
||||
#include "msg/msg_fmt.h"
|
||||
@@ -194,19 +198,17 @@ inline std::ostream& operator<<(std::ostream& out, const LogEntry& e)
|
||||
<< e.channel << " " << e.prio << " " << e.msg;
|
||||
}
|
||||
|
||||
-template <> struct fmt::formatter<EntityName> : fmt::formatter<std::string_view> {
|
||||
- template <typename FormatContext>
|
||||
- auto format(const EntityName& e, FormatContext& ctx) {
|
||||
- return formatter<std::string_view>::format(e.to_str(), ctx);
|
||||
- }
|
||||
-};
|
||||
+template <>
|
||||
+struct fmt::formatter<clog_type>: fmt::ostream_formatter {};
|
||||
|
||||
-template <> struct fmt::formatter<LogEntry> : fmt::formatter<std::string_view> {
|
||||
- template <typename FormatContext>
|
||||
- auto format(const LogEntry& e, FormatContext& ctx) {
|
||||
- return fmt::format_to(ctx.out(), "{} {} ({}) {} : {} {} {}",
|
||||
- e.stamp, e.name, e.rank, e.seq, e.channel, e.prio, e.msg);
|
||||
+template <>
|
||||
+struct fmt::formatter<EntityName> : fmt::formatter<std::string_view> {
|
||||
+ auto format(const EntityName& e, format_context& ctx) {
|
||||
+ return fmt::formatter<std::string_view>::format(e.to_str(), ctx);
|
||||
}
|
||||
};
|
||||
|
||||
+template <>
|
||||
+struct fmt::formatter<LogEntry> : fmt::ostream_formatter {};
|
||||
+
|
||||
#endif
|
||||
diff --git a/src/include/byteorder.h b/src/include/byteorder.h
|
||||
index eb6d5e102b4..9a4d0be877a 100644
|
||||
--- a/src/include/byteorder.h
|
||||
+++ b/src/include/byteorder.h
|
||||
@@ -53,3 +53,8 @@ inline ceph_les16 init_les16(__s16 x) {
|
||||
v = x;
|
||||
return v;
|
||||
}
|
||||
+
|
||||
+template <typename T>
|
||||
+auto format_as(ceph_le<T> c) {
|
||||
+ return (T)c;
|
||||
+}
|
||||
diff --git a/src/include/neorados/RADOS_fmt.hpp b/src/include/neorados/RADOS_fmt.hpp
|
||||
new file mode 100644
|
||||
index 00000000000..1512ec965fe
|
||||
--- /dev/null
|
||||
+++ b/src/include/neorados/RADOS_fmt.hpp
|
||||
@@ -0,0 +1,16 @@
|
||||
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
|
||||
+// vim: ts=8 sw=2 smarttab
|
||||
+#pragma once
|
||||
+/**
|
||||
+ * \file fmtlib formatters for some neorados types
|
||||
+ */
|
||||
+
|
||||
+#include <fmt/core.h>
|
||||
+#if FMT_VERSION >= 90000
|
||||
+#include <fmt/ostream.h>
|
||||
+#endif
|
||||
+
|
||||
+#include <include/neorados/RADOS.hpp>
|
||||
+
|
||||
+template <>
|
||||
+struct fmt::formatter<neorados::Object> : fmt::ostream_formatter {};
|
||||
diff --git a/src/include/types_fmt.h b/src/include/types_fmt.h
|
||||
new file mode 100644
|
||||
index 00000000000..3d40085f0b2
|
||||
--- /dev/null
|
||||
+++ b/src/include/types_fmt.h
|
||||
@@ -0,0 +1,16 @@
|
||||
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
|
||||
+// vim: ts=8 sw=2 smarttab
|
||||
+#pragma once
|
||||
+/**
|
||||
+ * \file fmtlib formatters for some types.h classes
|
||||
+ */
|
||||
+
|
||||
+#include <fmt/core.h>
|
||||
+#if FMT_VERSION >= 90000
|
||||
+#include <fmt/ostream.h>
|
||||
+#endif
|
||||
+
|
||||
+#include <include/types.h>
|
||||
+
|
||||
+template <>
|
||||
+struct fmt::formatter<shard_id_t> : fmt::ostream_formatter {};
|
||||
diff --git a/src/osd/SnapMapper.cc b/src/osd/SnapMapper.cc
|
||||
index 7893bc08fdc..e8d34cd25bc 100644
|
||||
--- a/src/osd/SnapMapper.cc
|
||||
+++ b/src/osd/SnapMapper.cc
|
||||
@@ -211,7 +211,7 @@ string SnapMapper::get_prefix(int64_t pool, snapid_t snap)
|
||||
return fmt::sprintf("%s%lld_%.16X_",
|
||||
MAPPING_PREFIX,
|
||||
pool,
|
||||
- snap);
|
||||
+ (uint64_t)snap);
|
||||
}
|
||||
|
||||
string SnapMapper::to_raw_key(
|
||||
@@ -650,7 +650,7 @@ string SnapMapper::make_purged_snap_key(int64_t pool, snapid_t last)
|
||||
return fmt::sprintf("%s_%lld_%016llx",
|
||||
PURGED_SNAP_PREFIX,
|
||||
pool,
|
||||
- last);
|
||||
+ (uint64_t)last);
|
||||
}
|
||||
|
||||
void SnapMapper::make_purged_snap_key_value(
|
||||
@@ -866,7 +866,7 @@ string SnapMapper::get_legacy_prefix(snapid_t snap)
|
||||
{
|
||||
return fmt::sprintf("%s%.16X_",
|
||||
LEGACY_MAPPING_PREFIX,
|
||||
- snap);
|
||||
+ (uint64_t)snap);
|
||||
}
|
||||
|
||||
string SnapMapper::to_legacy_raw_key(
|
||||
diff --git a/src/osd/osd_types.h b/src/osd/osd_types.h
|
||||
index afed5fa8351..e374369e8ba 100644
|
||||
--- a/src/osd/osd_types.h
|
||||
+++ b/src/osd/osd_types.h
|
||||
@@ -35,6 +35,7 @@
|
||||
#include "msg/msg_types.h"
|
||||
#include "include/compat.h"
|
||||
#include "include/types.h"
|
||||
+#include "include/types_fmt.h"
|
||||
#include "include/utime.h"
|
||||
#include "include/CompatSet.h"
|
||||
#include "common/ceph_context.h"
|
||||
diff --git a/src/osd/osd_types_fmt.h b/src/osd/osd_types_fmt.h
|
||||
index 8d48134106e..65a751469f7 100644
|
||||
--- a/src/osd/osd_types_fmt.h
|
||||
+++ b/src/osd/osd_types_fmt.h
|
||||
@@ -57,7 +57,7 @@ struct fmt::formatter<chunk_info_t> {
|
||||
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
|
||||
|
||||
template <typename FormatContext>
|
||||
- auto format(const chunk_info_t& ci, FormatContext& ctx)
|
||||
+ auto format(const chunk_info_t& ci, FormatContext& ctx) const
|
||||
{
|
||||
return fmt::format_to(ctx.out(), "(len: {} oid: {} offset: {} flags: {})",
|
||||
ci.length, ci.oid, ci.offset,
|
||||
@@ -169,7 +169,7 @@ struct fmt::formatter<pg_info_t> {
|
||||
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
|
||||
|
||||
template <typename FormatContext>
|
||||
- auto format(const pg_info_t& pgi, FormatContext& ctx)
|
||||
+ auto format(const pg_info_t& pgi, FormatContext& ctx) const
|
||||
{
|
||||
fmt::format_to(ctx.out(), "{}({}", pgi.pgid, (pgi.dne() ? " DNE" : ""));
|
||||
if (pgi.is_empty()) {
|
||||
@@ -211,7 +211,7 @@ struct fmt::formatter<SnapSet> {
|
||||
}
|
||||
|
||||
template <typename FormatContext>
|
||||
- auto format(const SnapSet& snps, FormatContext& ctx)
|
||||
+ auto format(const SnapSet& snps, FormatContext& ctx) const
|
||||
{
|
||||
if (verbose) {
|
||||
// similar to SnapSet::dump()
|
||||
@@ -265,7 +265,7 @@ struct fmt::formatter<ScrubMap::object> {
|
||||
|
||||
///\todo: consider passing the 'D" flag to control snapset dump
|
||||
template <typename FormatContext>
|
||||
- auto format(const ScrubMap::object& so, FormatContext& ctx)
|
||||
+ auto format(const ScrubMap::object& so, FormatContext& ctx) const
|
||||
{
|
||||
fmt::format_to(ctx.out(),
|
||||
"so{{ sz:{} dd:{} od:{} ",
|
||||
@@ -308,7 +308,7 @@ struct fmt::formatter<ScrubMap> {
|
||||
}
|
||||
|
||||
template <typename FormatContext>
|
||||
- auto format(const ScrubMap& smap, FormatContext& ctx)
|
||||
+ auto format(const ScrubMap& smap, FormatContext& ctx) const
|
||||
{
|
||||
fmt::format_to(ctx.out(),
|
||||
"smap{{ valid:{} incr-since:{} #:{}",
|
||||
diff --git a/src/tools/neorados.cc b/src/tools/neorados.cc
|
||||
index 24966d2aee5..44ee1cf199c 100644
|
||||
--- a/src/tools/neorados.cc
|
||||
+++ b/src/tools/neorados.cc
|
||||
@@ -36,6 +36,7 @@
|
||||
#include "include/buffer.h" // :(
|
||||
|
||||
#include "include/neorados/RADOS.hpp"
|
||||
+#include "include/neorados/RADOS_fmt.hpp"
|
||||
|
||||
using namespace std::literals;
|
||||
|
@ -1,8 +0,0 @@
|
||||
## allow ceph daemons (which run as user ceph) to collect device health metrics
|
||||
# Since v1.9.10 sudo allows for regex as a part of the COMMAND string.
|
||||
# vim: ft=sudoers
|
||||
|
||||
# Used in src/common/blkdev.cc for collecting device metrics
|
||||
# https://docs.ceph.com/en/latest/rados/operations/devices/#enabling-monitoring
|
||||
ceph ALL=NOPASSWD: /usr/bin/smartctl ^-x --json=o /dev/[a-zA-Z0-9/_-]+$
|
||||
ceph ALL=NOPASSWD: /usr/bin/nvme ^[a-zA-Z0-9_-]+ smart-log-add --json /dev/[a-zA-Z0-9/_-]+$
|
@ -1 +0,0 @@
|
||||
u ceph 340 - /run/ceph
|
@ -1,6 +0,0 @@
|
||||
--- a/src/test/encoding/CMakeLists.txt 2019-04-25 20:15:48.000000000 +0200
|
||||
+++ b/src/test/encoding/CMakeLists.txt 2019-07-14 21:11:37.380428899 +0200
|
||||
@@ -1,3 +1,2 @@
|
||||
# scripts
|
||||
add_ceph_test(check-generated.sh ${CMAKE_CURRENT_SOURCE_DIR}/check-generated.sh)
|
||||
-add_ceph_test(readable.sh ${CMAKE_CURRENT_SOURCE_DIR}/readable.sh)
|
155
ceph/justfile
155
ceph/justfile
@ -1,155 +0,0 @@
|
||||
PkgBase := "ceph"
|
||||
ChrootPath := env_var("HOME") / "chroot"
|
||||
ChrootBase := ChrootPath / "root"
|
||||
ChrootActive := ChrootPath / PkgVer + "_" + PkgRel
|
||||
Scripts := justfile_directory() / "scripts"
|
||||
|
||||
Color := env_var_or_default("USE_COLOR", "1")
|
||||
Chroot := env_var_or_default("USE_CHROOT", "1")
|
||||
|
||||
# Default to listing recipes
|
||||
_default:
|
||||
@just --list --list-prefix ' > '
|
||||
|
||||
# Build the package in a clean chroot
|
||||
build:
|
||||
@$Say Building @{{PkgBuild}} via chroot
|
||||
makechrootpkg -c -r {{ChrootPath}} -d "/tmp:/tmp" -C -n -l {{PkgVer}}_{{PkgRel}}
|
||||
|
||||
# Repackage without rebuilding
|
||||
repackage:
|
||||
@$Say Repackaging @{{PkgBuild}} via chroot
|
||||
makechrootpkg -r {{ChrootPath}} -l {{PkgVer}}_{{PkgRel}} -- --skipint --noprepare --noextract --nocheck --repackage --force
|
||||
|
||||
# Run a command in the chroot environment
|
||||
cexec +args: (_cexec "." args)
|
||||
|
||||
# Run ctest in the chroot environment
|
||||
ctest *args: (_cexec "build" "ctest" args)
|
||||
|
||||
# Create and update the base chroot
|
||||
chroot: (_update_chroot ChrootBase)
|
||||
|
||||
# Initialize the base chroot for building packages
|
||||
mkchroot: (_mkchroot ChrootBase)
|
||||
|
||||
# Install required dependencies
|
||||
deps:
|
||||
pacman -S base-devel util-linux sudo devtools ripgrep --needed --noconfirm
|
||||
|
||||
# Clean one or more of: chroot|deps|artifacts|logs
|
||||
clean +what="chroot":
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
$Say "cleaning directive(s): {{what}}"
|
||||
for item in {{what}}; do
|
||||
case $item in
|
||||
chroot|c)
|
||||
(set -x; rm -rf {{ChrootActive}})
|
||||
;;
|
||||
deps|d)
|
||||
(set -x; pacman -Rsc devtools --needed --noconfirm)
|
||||
;;
|
||||
artifacts|a)
|
||||
(set -x; rm -vf *tar.*)
|
||||
;;
|
||||
logs|l)
|
||||
(set -x; rm -vf *.log)
|
||||
;;
|
||||
*)
|
||||
$Say unknown clean directive $item, ignoring
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Upload built artifacts to Github, using the associated release
|
||||
upload pkg="@all": (_upload pkg)
|
||||
|
||||
# Initialize the chroot
|
||||
@_mkchroot $cbase:
|
||||
{{ if path_exists(cbase) == "true" { ":" } else { "$Say Initializing chroot @$cbase" } }}
|
||||
{{ if path_exists(cbase) == "true" { ":" } else { "mkarchroot $cbase base-devel" } }}
|
||||
|
||||
# Update dependencies in the base chroot
|
||||
@_update_chroot $cbase: (_mkchroot cbase)
|
||||
$Say Updating chroot packages @$cbase
|
||||
arch-nspawn $cbase pacman -Syu
|
||||
|
||||
# Exec into the chroot to a path relative to the workdir, and run the given args
|
||||
_cexec path +args:
|
||||
arch-nspawn {{ChrootActive}} --chdir /build/{{PkgBase}}/src/{{PkgBase}}-{{PkgVer}}/{{path}} sh -c {{quote(trim(args))}}
|
||||
|
||||
# Script to upload a comma separated list of packages to the active Github release
|
||||
_upload $pkgstring:
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
[[ -v GITHUB_TOKEN ]] || { $Say "Error: GITHUB_TOKEN must be set" && exit 4; }
|
||||
|
||||
IFS=', ' read -r -a PKGS <<<"$pkgstring"
|
||||
if printf '%s\0' "${PKGS[@]}" | grep -zxqF -- '@all'; then
|
||||
$Say Expanding '@all' to package set
|
||||
PKGS=($(rg -P --only-matching --replace '$1' '^package_(.+)\(\) {' {{PkgBuild}} | sort | xargs))
|
||||
fi
|
||||
|
||||
$Say "Uploading ${#PKGS[@]} package(s) to {{GithubRepo}}/releases/v{{PkgVer}}-{{PkgRel}}"
|
||||
printf ' > %s %s %s %s %s\n' "${PKGS[@]}" | column -t
|
||||
|
||||
declare -A FILES
|
||||
for pkg in "${PKGS[@]}"; do
|
||||
fname="$pkg-{{PkgVer}}-{{PkgRel}}-{{PkgArch}}.pkg.tar.zst"
|
||||
[[ -f "$fname" ]] || { $Say "Error: unable to locate artifact '$fname' for '$pkg' upload" && exit 7; }
|
||||
FILES[$pkg]=$fname
|
||||
done
|
||||
|
||||
for pkg in "${!FILES[@]}"; do
|
||||
src=${FILES[$pkg]}
|
||||
mime="$(file -b --mime-type $src)"
|
||||
dst="${pkg//-/_}_linux_{{PkgArch}}.tar.$(basename $mime)"
|
||||
|
||||
$Say "uploading '$src' as '$dst'"
|
||||
|
||||
{{Scripts}}.gh-upload-artifact.sh \
|
||||
${DryRun:+--dry-run} \
|
||||
--repo {{GithubRepo}} \
|
||||
--tag v{{PkgVer}}-{{PkgRel}} \
|
||||
--file "$src:$dst"
|
||||
done
|
||||
|
||||
# ~~~ Global shell variables ~~~
|
||||
export Say := "echo " + C_RED + "==> " + C_RESET + BuildId
|
||||
export DryRun := None
|
||||
export Debug := None
|
||||
|
||||
# Nicer name for empty strings
|
||||
None := ""
|
||||
|
||||
# ~~~ Contextual information ~~~
|
||||
PkgBuild := justfile_directory() / "PKGBUILD"
|
||||
PkgVer := `awk -F= '/pkgver=/ {print $2}' PKGBUILD`
|
||||
PkgRel := `awk -F= '/pkgrel=/ {print $2}' PKGBUILD`
|
||||
PkgArch := 'x86_64'
|
||||
GitCommitish := if `git tag --points-at HEAD` != None {
|
||||
`git tag --points-at HEAD`
|
||||
} else if `git branch --show-current` != None {
|
||||
`git branch --show-current`
|
||||
} else {
|
||||
`git rev-parse --short HEAD`
|
||||
}
|
||||
BuildId := "[" + C_YELLOW + PkgBase + C_RESET + "/" + C_GREEN + PkgVer + ":" + PkgRel + C_RESET + "@" + C_CYAN + GitCommitish + C_RESET + "]"
|
||||
BuildTriple := PkgVer + "-" + PkgRel + "-" + "x86_64"
|
||||
GithubRepo := "bazaah/aur-ceph"
|
||||
|
||||
# ~~~ Color Codes ~~~
|
||||
C_ENABLED := if Color =~ '(?i)^auto|yes|1$' { "1" } else { None }
|
||||
|
||||
C_RESET := if C_ENABLED == "1" { `echo -e "\033[0m"` } else { None }
|
||||
C_BLACK := if C_ENABLED == "1" { `echo -e "\033[0;30m"` } else { None }
|
||||
C_RED := if C_ENABLED == "1" { `echo -e "\033[0;31m"` } else { None }
|
||||
C_GREEN := if C_ENABLED == "1" { `echo -e "\033[0;32m"` } else { None }
|
||||
C_YELLOW := if C_ENABLED == "1" { `echo -e "\033[0;33m"` } else { None }
|
||||
C_BLUE := if C_ENABLED == "1" { `echo -e "\033[0;34m"` } else { None }
|
||||
C_MAGENTA := if C_ENABLED == "1" { `echo -e "\033[0;35m"` } else { None }
|
||||
C_CYAN := if C_ENABLED == "1" { `echo -e "\033[0;36m"` } else { None }
|
||||
C_WHITE := if C_ENABLED == "1" { `echo -e "\033[0;37m"` } else { None }
|
@ -1,150 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
$0 --repo=<owner/repo> --tag=<release tag | @> --file=<artifact path> --token=<github api token>
|
||||
|
||||
Usage:
|
||||
|
||||
This script uploads the given artifact to a GitHub release.
|
||||
|
||||
It takes 4 required options:
|
||||
|
||||
--repo | The <org>/<repo> combo to upload the artifact to env:\$GITHUB_REPO =${repo:-<unset>}
|
||||
--tag | The tag / release name to upload the artifact to env:\$GITHUB_RELEASE =${tag:-<unset>}
|
||||
--file | The file path to the artifact to upload env:\$GITHUB_ARTIFACT =${artifact:-<unset>}
|
||||
--token | The API token to use when authenticating with GitHub env:\$GITHUB_TOKEN =$([[-n "$token" ]] && printf '<*****>' || printf '<unset>')
|
||||
--dry-run | Don't actually upload the artifact env:\$GITHUB_DRYRUN =$([[-n "$dry" ]] && printf 'true' || printf 'false')
|
||||
--help | Print this help
|
||||
|
||||
Each option may also be set via the associated environment variable, with the command line taking precedence.
|
||||
|
||||
You may optionally force the uploaded artifact to have a different name from the given file, by suffixing --file
|
||||
with a ':<name-override>'. For example, '/path/to/artifact_with.dirty+name.tar.gz:artifact.tar.gz' will resolve
|
||||
the uploaded artifact's filename to 'artifact.tar.gz'.
|
||||
|
||||
To select the most recent tag on the remote, you can use the special value '@' when setting --tag.
|
||||
|
||||
Examples:
|
||||
|
||||
1. $(basename $0) --tag=v1.0.0 --repo github/octocat --file ./cute-kat.jpg --token=dae3re34...
|
||||
|
||||
2. GITHUB_TOKEN=4wf4dec... GITHUB_ARTIFACT=/assets/cats.json $(basename $0) --repo=github/octocat --tag=v2.0.0
|
||||
|
||||
3. $(basename $0) --tag=@ --repo github/octocat --file ./dirty_m3sszy.kitty.tar.xz:kitty.tar.xz --token f43rffe...
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
requires() {
|
||||
local t v efails xfails emsg
|
||||
for t in "$@"; do
|
||||
v=${t#*:}
|
||||
|
||||
case $t in
|
||||
e:*)
|
||||
[[ -n "${!v}" ]] || efails+=("$v")
|
||||
;;
|
||||
x:*|*)
|
||||
command -v "${v}" &>/dev/null || xfails+=("$v")
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
[[ -n "${efails}" ]] && emsg=$(printf 'Missing required variable(s): %s' "${efails[*]}")
|
||||
[[ -n "${xfails}" ]] && emsg=$(printf '%s\nMissing required executable(s): %s' "$emsg" "${xfails[*]}")
|
||||
[[ -n "$emsg" ]] && die 3 "$(printf '%s\n[INFO]: Use --help for more information' "$emsg")"
|
||||
}
|
||||
|
||||
cli() {
|
||||
local GOPTS=$(getopt -o hd --long help,dry-run,repo:,tag:,file:,token: -n "gh-upload-artifact" -- "$@")
|
||||
eval set -- "$GOPTS"
|
||||
|
||||
while (( $# > 0 )); do
|
||||
case "$1" in
|
||||
--repo) repo="$2"; shift 2 ;;
|
||||
--tag) tag="$2"; shift 2 ;;
|
||||
--file) artifact="$2"; shift 2 ;;
|
||||
--token) token="$2"; shift 2 ;;
|
||||
-h|--help) help=1; shift ;;
|
||||
-d|--dry-run) dry=1; shift ;;
|
||||
--) shift; break ;;
|
||||
*) die 1 "Unknown option: $1" 1 ;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
set_filename() {
|
||||
artifact=${1%:*}
|
||||
filename=${1##*:}
|
||||
[[ -n "$filename" ]] || filename=$(basename $artifact)
|
||||
}
|
||||
|
||||
die() {
|
||||
echo "[ERROR]: $2" >&2
|
||||
[ -n "$3" ] && usage >&2
|
||||
exit $1
|
||||
}
|
||||
|
||||
setup() {
|
||||
requires x:getopt x:curl x:jq
|
||||
|
||||
# Script inputs
|
||||
repo=${GITHUB_REPO}
|
||||
tag=${GITHUB_RELEASE}
|
||||
artifact=${GITHUB_ARTIFACT}
|
||||
token=${GITHUB_TOKEN}
|
||||
dry=${GITHUB_DRYRUN:+1}
|
||||
filename=""
|
||||
help=-1
|
||||
|
||||
cli "$@"
|
||||
(( help > 0 )) && usage && exit 0
|
||||
|
||||
requires e:repo e:tag e:artifact e:token
|
||||
|
||||
set_filename "$artifact"
|
||||
|
||||
# Constants
|
||||
GH_RESPONSE=$(mktemp)
|
||||
GH_AUTH="Authorization: token $token"
|
||||
CURL_ARGS="-sSL"
|
||||
[[ "$tag" == '@' ]] \
|
||||
&& GH_TAG="latest" \
|
||||
|| GH_TAG="tags/$tag"
|
||||
}
|
||||
|
||||
main() {
|
||||
setup "$@"
|
||||
trap 'rm -f $GH_RESPONSE' EXIT
|
||||
|
||||
# Fetch release JSON blob
|
||||
curl "$CURL_ARGS" -o "$GH_RESPONSE" -H "$GH_AUTH" "https://api.github.com/repos/$repo/releases/$GH_TAG"
|
||||
(( $? > 0 )) && die 1 "$(cat <(printf "Invalid repo, token or network issue!\n") $GH_RESPONSE)"
|
||||
|
||||
# Get ID of the asset based on given filename.
|
||||
id=$(jq -r '.id' "$GH_RESPONSE")
|
||||
(( $? > 0 )) && die 1 "$(cat <(printf "Failed to get release id for tag '%s' from response:\n" "$tag") $GH_RESPONSE)"
|
||||
|
||||
# Construct url
|
||||
upload_uri="https://uploads.github.com/repos/$repo/releases/$id/assets?name=$filename"
|
||||
|
||||
# Upload artifact
|
||||
if [[ -n "$dry" ]]; then
|
||||
echo "[INFO]: Would upload $artifact to $upload_uri" >&2
|
||||
else
|
||||
curl "$CURL_ARGS" \
|
||||
-H "$GH_AUTH" \
|
||||
-H "Content-Type: $(file -b --mime-type $artifact)" \
|
||||
--data-binary @"$artifact" \
|
||||
-o "$GH_RESPONSE" \
|
||||
$upload_uri
|
||||
|
||||
(( $? > 0 )) \
|
||||
&& die 1 "$(cat <(printf "Failed to upload $filename to $repo:\n") $GH_RESPONSE)" \
|
||||
|| jq '.' "$GH_RESPONSE"
|
||||
fi
|
||||
}
|
||||
|
||||
[[ -n "$Debug" ]] && set -x
|
||||
main "$@"
|
@ -1,27 +0,0 @@
|
||||
From b92b17ecced6df463da73d6de566740cf5cd00d4 Mon Sep 17 00:00:00 2001
|
||||
From: Marek Kasik <mkasik@redhat.com>
|
||||
Date: Fri, 1 Feb 2013 15:19:58 +0100
|
||||
Subject: [PATCH 1/2] Poll files on nfs4
|
||||
|
||||
Add nfs4 among polled filesystems.
|
||||
|
||||
https://bugzilla.gnome.org/show_bug.cgi?id=693006
|
||||
---
|
||||
server/gam_fs.c | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/server/gam_fs.c b/server/gam_fs.c
|
||||
index c8ca704..143a603 100644
|
||||
--- a/server/gam_fs.c
|
||||
+++ b/server/gam_fs.c
|
||||
@@ -178,6 +178,7 @@ gam_fs_init (void)
|
||||
gam_fs_set ("reiserfs", GFS_MT_DEFAULT, 0);
|
||||
gam_fs_set ("novfs", GFS_MT_POLL, 30);
|
||||
gam_fs_set ("nfs", GFS_MT_POLL, 5);
|
||||
+ gam_fs_set ("nfs4", GFS_MT_POLL, 5);
|
||||
if (stat("/etc/mtab", &mtab_sbuf) != 0)
|
||||
{
|
||||
GAM_DEBUG(DEBUG_INFO, "Could not stat /etc/mtab\n");
|
||||
--
|
||||
1.8.1.2
|
||||
|
@ -1,70 +0,0 @@
|
||||
From cc14440eface093548cb3bc7814da11d9a99d283 Mon Sep 17 00:00:00 2001
|
||||
From: Anssi Hannula <anssi@mageia.org>
|
||||
Date: Wed, 4 Jan 2012 00:23:55 +0200
|
||||
Subject: [PATCH] fix possible server deadlock in ih_sub_cancel
|
||||
|
||||
ih_sub_foreach() calls ih_sub_cancel() while inotify_lock is locked.
|
||||
However, ih_sub_cancel() locks it again, and locking GMutex recursively
|
||||
causes undefined behaviour.
|
||||
|
||||
Fix that by removing locking from ih_sub_cancel() as ih_sub_foreach()
|
||||
is its only user. Also make the function static so that it won't
|
||||
accidentally get used by other files without locking (inotify-helper.h
|
||||
is an internal server header).
|
||||
|
||||
This should fix the intermittent deadlocks I've been experiencing
|
||||
causing KDE applications to no longer start, and probably also
|
||||
http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=542361
|
||||
|
||||
Origin: http://bugzilla-attachments.gnome.org/attachment.cgi?id=204537
|
||||
Bug-Ubuntu: https://bugs.launchpad.net/ubuntu/+source/gamin/+bug/926862
|
||||
Bug-Debian: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=542361
|
||||
|
||||
---
|
||||
server/inotify-helper.c | 7 ++-----
|
||||
server/inotify-helper.h | 1 -
|
||||
2 files changed, 2 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/server/inotify-helper.c b/server/inotify-helper.c
|
||||
index d77203e..0789fa4 100644
|
||||
--- a/server/inotify-helper.c
|
||||
+++ b/server/inotify-helper.c
|
||||
@@ -123,13 +123,11 @@ ih_sub_add (ih_sub_t * sub)
|
||||
|
||||
/**
|
||||
* Cancels a subscription which was being monitored.
|
||||
+ * inotify_lock must be held when calling.
|
||||
*/
|
||||
-gboolean
|
||||
+static gboolean
|
||||
ih_sub_cancel (ih_sub_t * sub)
|
||||
{
|
||||
- G_LOCK(inotify_lock);
|
||||
-
|
||||
-
|
||||
if (!sub->cancelled)
|
||||
{
|
||||
IH_W("cancelling %s\n", sub->pathname);
|
||||
@@ -140,7 +138,6 @@ ih_sub_cancel (ih_sub_t * sub)
|
||||
sub_list = g_list_remove (sub_list, sub);
|
||||
}
|
||||
|
||||
- G_UNLOCK(inotify_lock);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
diff --git a/server/inotify-helper.h b/server/inotify-helper.h
|
||||
index 5d3b6d0..d36b5fd 100644
|
||||
--- a/server/inotify-helper.h
|
||||
+++ b/server/inotify-helper.h
|
||||
@@ -34,7 +34,6 @@ gboolean ih_startup (event_callback_t ecb,
|
||||
found_callback_t fcb);
|
||||
gboolean ih_running (void);
|
||||
gboolean ih_sub_add (ih_sub_t *sub);
|
||||
-gboolean ih_sub_cancel (ih_sub_t *sub);
|
||||
|
||||
/* Return FALSE from 'f' if the subscription should be cancelled */
|
||||
void ih_sub_foreach (void *callerdata, gboolean (*f)(ih_sub_t *sub, void *callerdata));
|
||||
--
|
||||
1.7.7.2
|
||||
|
@ -1,53 +0,0 @@
|
||||
# POWER Maintainer: Alexander Baldeck <alex.bldck@gmail.com>
|
||||
# Maintainer:
|
||||
# Contributor: Abhishek Dasgupta <abhidg@gmail.com>
|
||||
# Contributor: Pulphix <crimea.v@libero.it>
|
||||
|
||||
pkgname=gamin
|
||||
pkgver=0.1.10
|
||||
pkgrel=10
|
||||
pkgdesc='File and directory monitoring system defined to be a subset of the FAM (File Alteration Monitor)'
|
||||
url='http://www.gnome.org/~veillard/gamin'
|
||||
license=('GPL')
|
||||
arch=(x86_64 powerpc64le)
|
||||
depends=('glib2')
|
||||
makedepends=('python2')
|
||||
optdepends=('python2: for the python module')
|
||||
provides=('fam')
|
||||
conflicts=('fam')
|
||||
source=("$url/sources/${pkgname}-${pkgver}.tar.gz"
|
||||
'fix-deprecated-const.patch'
|
||||
'18_gam_server_deadlocks.patch'
|
||||
'0001-Poll-files-on-nfs4.patch')
|
||||
sha512sums=('21bfe6fcf8fb3117cd5a08c8ce3b8d0d1dd23e478e60a95b76c20d02cc29b050dde086578d81037990484ff891c3e104d2cbbf3d294b4a79346b14a0cae075bb'
|
||||
'c4c10bee70c7231db395cbfe5bdf513ade6be599a11a9d35888ddfaca42d619fe2b5e87c2b2bab469ea98ba718bc01711252313ba5f53c392379b669f5b2902b'
|
||||
'ae2d3f3cd16e2da05836cbb2f21527896db5d5067ef4b120e943693234a685527eff528955ed80120265ca70e04a88cc28413cc34311d6faa068c620339fad38'
|
||||
'dcb23fd68e106a1b578235ef0b01b49773908ca6ded706610103f880f77a2aa0b0403cb720b9c6b062bac71e9d66cd2288b489c558839fc23295b18635bf399f')
|
||||
|
||||
prepare() {
|
||||
cd $pkgname-$pkgver
|
||||
|
||||
# https://bugs.archlinux.org/task/33642
|
||||
patch -Np1 -i ../18_gam_server_deadlocks.patch
|
||||
|
||||
patch -Np1 -i ../fix-deprecated-const.patch
|
||||
patch -Np1 -i ../0001-Poll-files-on-nfs4.patch
|
||||
|
||||
# python 2
|
||||
sed -i 's_#!/usr/bin/env python_#!/usr/bin/env python2_' python/gamin.py
|
||||
}
|
||||
|
||||
build() {
|
||||
cd $pkgname-$pkgver
|
||||
./configure --build=$CHOST --prefix=/usr --disable-static --with-threads \
|
||||
--disable-debug-api --disable-debug --libexecdir=/usr/lib/gamin \
|
||||
--with-python=/usr/bin/python2
|
||||
make
|
||||
}
|
||||
|
||||
package() {
|
||||
cd $pkgname-$pkgver
|
||||
make DESTDIR="$pkgdir" install
|
||||
}
|
||||
|
||||
# vim:set ts=2 sw=2 et:
|
@ -1,56 +0,0 @@
|
||||
Description: Don't use deprecated G_CONST_RETURN. Fixes building with newer glib versions.
|
||||
Author: Matthias Klose <doko@ubuntu.com>
|
||||
Bug-Ubuntu: https://launchpad.net/bugs/829504
|
||||
|
||||
Index: gamin/server/gam_node.c
|
||||
===================================================================
|
||||
--- gamin.orig/server/gam_node.c 2011-10-18 16:09:04.873780685 +0200
|
||||
+++ gamin/server/gam_node.c 2011-10-18 16:09:01.965780543 +0200
|
||||
@@ -122,7 +122,7 @@
|
||||
* it has finished with the string. If it must keep it longer, it
|
||||
* should makes its own copy. The returned string must not be freed.
|
||||
*/
|
||||
-G_CONST_RETURN char *
|
||||
+const char *
|
||||
gam_node_get_path(GamNode * node)
|
||||
{
|
||||
g_assert(node);
|
||||
Index: gamin/server/gam_node.h
|
||||
===================================================================
|
||||
--- gamin.orig/server/gam_node.h 2011-10-18 16:09:04.729780677 +0200
|
||||
+++ gamin/server/gam_node.h 2011-10-18 16:09:01.961780544 +0200
|
||||
@@ -58,7 +58,7 @@
|
||||
void gam_node_set_is_dir (GamNode *node,
|
||||
gboolean is_dir);
|
||||
|
||||
-G_CONST_RETURN char *gam_node_get_path (GamNode *node);
|
||||
+const char *gam_node_get_path (GamNode *node);
|
||||
|
||||
GList *gam_node_get_subscriptions (GamNode *node);
|
||||
|
||||
Index: gamin/server/gam_subscription.c
|
||||
===================================================================
|
||||
--- gamin.orig/server/gam_subscription.c 2011-10-18 16:09:04.817780682 +0200
|
||||
+++ gamin/server/gam_subscription.c 2011-10-18 16:09:01.965780543 +0200
|
||||
@@ -141,7 +141,7 @@
|
||||
* @param sub the GamSubscription
|
||||
* @returns The path being monitored. It should not be freed.
|
||||
*/
|
||||
-G_CONST_RETURN char *
|
||||
+const char *
|
||||
gam_subscription_get_path(GamSubscription * sub)
|
||||
{
|
||||
if (sub == NULL)
|
||||
Index: gamin/server/gam_subscription.h
|
||||
===================================================================
|
||||
--- gamin.orig/server/gam_subscription.h 2011-10-18 16:09:04.929780687 +0200
|
||||
+++ gamin/server/gam_subscription.h 2011-10-18 16:09:01.965780543 +0200
|
||||
@@ -21,7 +21,7 @@
|
||||
|
||||
int gam_subscription_get_reqno (GamSubscription *sub);
|
||||
|
||||
-G_CONST_RETURN char *gam_subscription_get_path (GamSubscription *sub);
|
||||
+const char *gam_subscription_get_path (GamSubscription *sub);
|
||||
|
||||
GamListener *gam_subscription_get_listener (GamSubscription *sub);
|
||||
|
27
libayatana-appindicator/.SRCINFO
Normal file
27
libayatana-appindicator/.SRCINFO
Normal file
@ -0,0 +1,27 @@
|
||||
pkgbase = libayatana-appindicator
|
||||
pkgdesc = Ayatana Application Indicators shared library
|
||||
pkgver = 0.5.93
|
||||
pkgrel = 1.1
|
||||
url = https://github.com/AyatanaIndicators/libayatana-appindicator
|
||||
arch = x86_64
|
||||
arch = powerpc64le
|
||||
arch = powerpc64
|
||||
arch = powerpc
|
||||
arch = riscv64
|
||||
license = LGPL2.1
|
||||
license = LGPL3
|
||||
makedepends = cmake
|
||||
makedepends = glib2-devel
|
||||
makedepends = gobject-introspection
|
||||
makedepends = vala
|
||||
depends = gcc-libs
|
||||
depends = glib2
|
||||
depends = glibc
|
||||
depends = gtk3
|
||||
depends = libayatana-indicator
|
||||
depends = libdbusmenu-glib
|
||||
depends = libdbusmenu-gtk3
|
||||
source = https://github.com/AyatanaIndicators/libayatana-appindicator/archive/0.5.93/libayatana-appindicator-0.5.93.tar.gz
|
||||
sha256sums = cbefed7a918a227bf71286246e237fcd3a9c8499b3eaac4897811a869409edf0
|
||||
|
||||
pkgname = libayatana-appindicator
|
@ -1,40 +0,0 @@
|
||||
# POWER Maintainer: Alexander Baldeck <alex.bldck@gmail.com>
|
||||
# Maintainer: Jan Alexander Steffens (heftig) <jan.steffens@gmail.com>
|
||||
# Contributor: Gustavo Alvarez <sl1pkn07@gmail.com>
|
||||
# Contributor: Panagiotis Papadopoulos pano_90 AT gmx DOT net
|
||||
pkgname=libbs2b
|
||||
pkgver=3.1.0
|
||||
pkgrel=7.1
|
||||
pkgdesc="Bauer stereophonic-to-binaural DSP effect library"
|
||||
url="http://bs2b.sourceforge.net"
|
||||
arch=(x86_64 powerpc64le powerpc64 powerpc riscv64)
|
||||
license=('custom:MIT')
|
||||
depends=(libsndfile)
|
||||
provides=(libbs2b.so)
|
||||
source=("https://downloads.sourceforge.net/sourceforge/bs2b/$pkgname-$pkgver.tar.lzma")
|
||||
md5sums=('00d32ffa6461dde6a632c846da3e0a13')
|
||||
sha1sums=('ec847e38a3a6f7eeed245b44e53f02cfff056df0')
|
||||
|
||||
prepare() {
|
||||
cd $pkgname-$pkgver
|
||||
sed -i 's/dist-lzma/dist-xz/g' configure.ac
|
||||
autoreconf -fvi
|
||||
}
|
||||
|
||||
build() {
|
||||
cd $pkgname-$pkgver
|
||||
./configure --prefix=/usr
|
||||
sed -i -e 's/ -shared / -Wl,-O1,--as-needed\0/g' libtool
|
||||
make
|
||||
}
|
||||
|
||||
check() {
|
||||
cd $pkgname-$pkgver
|
||||
make check
|
||||
}
|
||||
|
||||
package() {
|
||||
cd $pkgname-$pkgver
|
||||
make DESTDIR="$pkgdir" install
|
||||
install -Dm644 COPYING "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
|
||||
}
|
44
vtk/fmt-11.patch
Normal file
44
vtk/fmt-11.patch
Normal file
@ -0,0 +1,44 @@
|
||||
diff -ru VTK-9.3.1.orig/ThirdParty/ioss/vtkioss/Ioss_IOFactory.C VTK-9.3.1/ThirdParty/ioss/vtkioss/Ioss_IOFactory.C
|
||||
--- VTK-9.3.1.orig/ThirdParty/ioss/vtkioss/Ioss_IOFactory.C 2024-09-08 10:39:43.785130427 +0200
|
||||
+++ VTK-9.3.1/ThirdParty/ioss/vtkioss/Ioss_IOFactory.C 2024-09-08 10:42:08.072087946 +0200
|
||||
@@ -12,6 +12,7 @@
|
||||
#include <cstddef> // for nullptr
|
||||
#include "vtk_fmt.h"
|
||||
#include VTK_FMT(fmt/ostream.h)
|
||||
+#include VTK_FMT(fmt/ranges.h)
|
||||
#include <map> // for _Rb_tree_iterator, etc
|
||||
#include <ostream> // for basic_ostream, etc
|
||||
#include <set>
|
||||
diff -ru VTK-9.3.1.orig/ThirdParty/ioss/vtkioss/Ioss_StructuredBlock.C VTK-9.3.1/ThirdParty/ioss/vtkioss/Ioss_StructuredBlock.C
|
||||
--- VTK-9.3.1.orig/ThirdParty/ioss/vtkioss/Ioss_StructuredBlock.C 2024-09-08 10:39:43.788463820 +0200
|
||||
+++ VTK-9.3.1/ThirdParty/ioss/vtkioss/Ioss_StructuredBlock.C 2024-09-08 10:57:28.625305675 +0200
|
||||
@@ -15,6 +15,7 @@
|
||||
#include <Ioss_StructuredBlock.h>
|
||||
#include "vtk_fmt.h"
|
||||
#include VTK_FMT(fmt/ostream.h)
|
||||
+#include VTK_FMT(fmt/ranges.h)
|
||||
|
||||
#include <cstddef> // for size_t
|
||||
#include <numeric>
|
||||
diff -ru VTK-9.3.1.orig/ThirdParty/ioss/vtkioss/Ioss_Utils.C VTK-9.3.1/ThirdParty/ioss/vtkioss/Ioss_Utils.C
|
||||
--- VTK-9.3.1.orig/ThirdParty/ioss/vtkioss/Ioss_Utils.C 2024-09-08 10:39:43.791797211 +0200
|
||||
+++ VTK-9.3.1/ThirdParty/ioss/vtkioss/Ioss_Utils.C 2024-09-08 10:47:06.456187726 +0200
|
||||
@@ -21,6 +21,7 @@
|
||||
#include VTK_FMT(fmt/chrono.h)
|
||||
#include VTK_FMT(fmt/format.h)
|
||||
#include VTK_FMT(fmt/ostream.h)
|
||||
+#include VTK_FMT(fmt/ranges.h)
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
diff -ru VTK-9.3.1.orig/ThirdParty/ioss/vtkioss/Ioss_ZoneConnectivity.C VTK-9.3.1/ThirdParty/ioss/vtkioss/Ioss_ZoneConnectivity.C
|
||||
--- VTK-9.3.1.orig/ThirdParty/ioss/vtkioss/Ioss_ZoneConnectivity.C 2024-09-08 10:39:43.791797211 +0200
|
||||
+++ VTK-9.3.1/ThirdParty/ioss/vtkioss/Ioss_ZoneConnectivity.C 2024-09-08 10:51:18.889659250 +0200
|
||||
@@ -9,6 +9,7 @@
|
||||
#include <cstddef> // for size_t
|
||||
#include "vtk_fmt.h"
|
||||
#include VTK_FMT(fmt/ostream.h)
|
||||
+#include VTK_FMT(fmt/ranges.h)
|
||||
#include <string> // for string
|
||||
#include <vector> // for vector
|
||||
|
Loading…
x
Reference in New Issue
Block a user