depot/third_party/nixpkgs/pkgs/applications/misc/cura/lulzbot/curaengine-openmp-compat.patch

48 lines
3.3 KiB
Diff
Raw Normal View History

# Notes by Charles Duffy <charles@dyfis.net> --
#
# - The new version of OpenMP does not allow outside variables to be referenced
# *at all* without an explicit declaration of how they're supposed to be
# handled. Thus, this was an outright build failure beforehand. The new
# pragmas copy the initial value from the outer scope into each parallel
# thread. Since these variables are all constant within the loops, this is
# clearly correct. (Not sure it's *optimal*, but quite sure it isn't
# *wrong*).
# - Upstream has been contacted -- I'm a Lulzbot customer with an active
# support contract and sent them the patch. That said, they're in the middle
# of some major corporate churn (sold themselves out of near-bankruptcy to an
# out-of-state business entity formed as a holding company; moved to that
# state; have been slowly restaffing after), so a response may take a while.
# - The patch is purely my own work.
--- curaengine/src/support.cpp.orig 2020-03-28 10:38:01.953912363 -0500
+++ curaengine/src/support.cpp 2020-03-28 10:45:28.999791908 -0500
@@ -854,7 +854,7 @@
const double tan_angle = tan(angle) - 0.01; // the XY-component of the supportAngle
xy_disallowed_per_layer[0] = storage.getLayerOutlines(0, false).offset(xy_distance);
// for all other layers (of non support meshes) compute the overhang area and possibly use that when calculating the support disallowed area
- #pragma omp parallel for default(none) shared(xy_disallowed_per_layer, storage, mesh) schedule(dynamic)
+ #pragma omp parallel for default(none) firstprivate(layer_count, is_support_mesh_place_holder, use_xy_distance_overhang, z_distance_top, tan_angle, xy_distance, xy_distance_overhang) shared(xy_disallowed_per_layer, storage, mesh) schedule(dynamic)
for (unsigned int layer_idx = 1; layer_idx < layer_count; layer_idx++)
{
Polygons outlines = storage.getLayerOutlines(layer_idx, false);
@@ -1054,7 +1054,7 @@
const int max_checking_layer_idx = std::min(static_cast<int>(storage.support.supportLayers.size())
, static_cast<int>(layer_count - (layer_z_distance_top - 1)));
const size_t max_checking_idx_size_t = std::max(0, max_checking_layer_idx);
-#pragma omp parallel for default(none) shared(support_areas, storage) schedule(dynamic)
+#pragma omp parallel for default(none) firstprivate(max_checking_idx_size_t, layer_z_distance_top) shared(support_areas, storage) schedule(dynamic)
for (size_t layer_idx = 0; layer_idx < max_checking_idx_size_t; layer_idx++)
{
support_areas[layer_idx] = support_areas[layer_idx].difference(storage.getLayerOutlines(layer_idx + layer_z_distance_top - 1, false));
--- curaengine/src/layerPart.cpp.orig 2020-03-28 10:36:40.381023651 -0500
+++ curaengine/src/layerPart.cpp 2020-03-28 10:39:54.584140465 -0500
@@ -49,7 +49,7 @@
{
const auto total_layers = slicer->layers.size();
assert(mesh.layers.size() == total_layers);
-#pragma omp parallel for default(none) shared(mesh, slicer) schedule(dynamic)
+#pragma omp parallel for default(none) firstprivate(total_layers) shared(mesh, slicer) schedule(dynamic)
for (unsigned int layer_nr = 0; layer_nr < total_layers; layer_nr++)
{
SliceLayer& layer_storage = mesh.layers[layer_nr];