build.py 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. #!/usr/bin/env python3
  2. # Copyright Materialize, Inc. and contributors. All rights reserved.
  3. #
  4. # Use of this software is governed by the Business Source License
  5. # included in the LICENSE file at the root of this repository.
  6. #
  7. # As of the Change Date specified in that file, in accordance with
  8. # the Business Source License, use of this software will be governed
  9. # by the Apache License, Version 2.0.
  10. import os
  11. from concurrent.futures import ThreadPoolExecutor, wait
  12. from pathlib import Path
  13. from materialize import bazel, mzbuild, spawn, ui
  14. from materialize.ci_util.upload_debug_symbols_to_s3 import (
  15. DEBUGINFO_BINS,
  16. upload_debuginfo_to_s3,
  17. )
  18. from materialize.mzbuild import CargoBuild, Repository, ResolvedImage
  19. from materialize.rustc_flags import Sanitizer
  20. from materialize.xcompile import Arch
  21. def main() -> None:
  22. try:
  23. coverage = ui.env_is_truthy("CI_COVERAGE_ENABLED")
  24. sanitizer = Sanitizer[os.getenv("CI_SANITIZER", "none")]
  25. bazel = ui.env_is_truthy("CI_BAZEL_BUILD")
  26. bazel_remote_cache = os.getenv("CI_BAZEL_REMOTE_CACHE")
  27. bazel_lto = ui.env_is_truthy("CI_BAZEL_LTO")
  28. repo = mzbuild.Repository(
  29. Path("."),
  30. coverage=coverage,
  31. sanitizer=sanitizer,
  32. bazel=bazel,
  33. bazel_remote_cache=bazel_remote_cache,
  34. bazel_lto=bazel_lto,
  35. )
  36. # Build and push any images that are not already available on Docker Hub,
  37. # so they are accessible to other build agents.
  38. print("--- Acquiring mzbuild images")
  39. built_images = set()
  40. deps = repo.resolve_dependencies(image for image in repo if image.publish)
  41. deps.ensure(post_build=lambda image: built_images.add(image))
  42. with ThreadPoolExecutor(max_workers=2) as executor:
  43. futures = [
  44. executor.submit(maybe_upload_debuginfo, repo, built_images),
  45. executor.submit(annotate_buildkite_with_tags, repo.rd.arch, deps),
  46. ]
  47. # Wait until all tasks are complete
  48. wait(futures)
  49. except:
  50. if step_key := os.getenv("BUILDKITE_STEP_KEY"):
  51. spawn.runv(
  52. [
  53. "buildkite-agent",
  54. "meta-data",
  55. "set",
  56. step_key,
  57. "failed",
  58. ]
  59. )
  60. raise
  61. def annotate_buildkite_with_tags(arch: Arch, deps: mzbuild.DependencySet) -> None:
  62. tags = "\n".join([f"* `{dep.spec()}`" for dep in deps])
  63. markdown = f"""<details><summary>{arch} Docker tags produced in this build</summary>
  64. {tags}
  65. </details>"""
  66. spawn.runv(
  67. ["buildkite-agent", "annotate", "--style=info", f"--context=build-{arch}"],
  68. stdin=markdown.encode(),
  69. )
  70. def maybe_upload_debuginfo(
  71. repo: mzbuild.Repository, built_images: set[ResolvedImage], max_tries: int = 3
  72. ) -> None:
  73. """Uploads debuginfo to `DEBUGINFO_S3_BUCKET` and Polar Signals if any
  74. DEBUGINFO_BINS were built."""
  75. # Find all binaries created by the `cargo-bin` pre-image.
  76. bins, bazel_bins = find_binaries_created_by_cargo_bin(
  77. repo, built_images, DEBUGINFO_BINS
  78. )
  79. if len(bins) == 0:
  80. print("No debuginfo bins were built")
  81. return
  82. ui.section(f"Uploading debuginfo for {', '.join(bins)}...")
  83. is_tag_build = ui.env_is_truthy("BUILDKITE_TAG")
  84. for bin in bins:
  85. bin_path = get_bin_path(repo, bin, bazel_bins)
  86. dbg_path = bin_path.with_suffix(bin_path.suffix + ".debug")
  87. spawn.runv(
  88. [
  89. *repo.rd.tool("objcopy"),
  90. bin_path,
  91. dbg_path,
  92. "--only-keep-debug",
  93. ],
  94. )
  95. # Upload binary and debuginfo to S3 bucket, regardless of whether this
  96. # is a tag build or not. S3 is cheap.
  97. build_id = upload_debuginfo_to_s3(bin_path, dbg_path, is_tag_build)
  98. print(f"Uploaded debuginfo to S3 with build_id {build_id}")
  99. def find_binaries_created_by_cargo_bin(
  100. repo: Repository, built_images: set[ResolvedImage], bin_names: set[str]
  101. ) -> tuple[set[str], dict[str, str]]:
  102. bins: set[str] = set()
  103. bazel_bins: dict[str, str] = dict()
  104. for image in built_images:
  105. for pre_image in image.image.pre_images:
  106. if isinstance(pre_image, CargoBuild):
  107. for bin in pre_image.bins:
  108. if bin in bin_names:
  109. bins.add(bin)
  110. if repo.rd.bazel:
  111. bazel_bins[bin] = pre_image.bazel_bins[bin]
  112. return bins, bazel_bins
  113. def get_bin_path(repo: Repository, bin: str, bazel_bins: dict[str, str]) -> Path:
  114. if repo.rd.bazel:
  115. options = repo.rd.bazel_config()
  116. paths = bazel.output_paths(bazel_bins[bin], options)
  117. assert len(paths) == 1, f"{bazel_bins[bin]} output more than 1 file"
  118. return paths[0]
  119. else:
  120. cargo_profile = (
  121. "release"
  122. if repo.rd.profile == mzbuild.Profile.RELEASE
  123. else (
  124. "optimized" if repo.rd.profile == mzbuild.Profile.OPTIMIZED else "debug"
  125. )
  126. )
  127. return repo.rd.cargo_target_dir() / cargo_profile / bin
  128. if __name__ == "__main__":
  129. main()