Contents

Spring Boot 3 uses Micrometer Tracing as the tracing API (replacing Spring Cloud Sleuth). It bridges to either OpenTelemetry SDK or Brave (Zipkin's library), keeping your application code independent of the backend.

Add the following to your pom.xml (Maven) or build.gradle (Gradle). Spring Boot manages compatible versions automatically when you use the Spring Boot BOM.

<!-- Core tracing — Micrometer Tracing with OpenTelemetry bridge --> <dependency> <groupId>io.micrometer</groupId> <artifactId>micrometer-tracing-bridge-otel</artifactId> </dependency> <!-- OTLP exporter (for Grafana Tempo, Jaeger, any OTEL collector) --> <dependency> <groupId>io.opentelemetry</groupId> <artifactId>opentelemetry-exporter-otlp</artifactId> </dependency> <!-- OR: Zipkin exporter (Brave bridge) --> <!-- <dependency> <groupId>io.micrometer</groupId> <artifactId>micrometer-tracing-bridge-brave</artifactId> </dependency> <dependency> <groupId>io.zipkin.reporter2</groupId> <artifactId>zipkin-reporter-brave</artifactId> </dependency> --> <!-- Actuator needed for /actuator/health and trace sampling config --> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-actuator</artifactId> </dependency> # application.yml management: tracing: sampling: probability: 1.0 # 1.0 = 100% of requests traced (use 0.1 in high-traffic prod) zipkin: tracing: endpoint: http://zipkin:9411/api/v2/spans # for Brave/Zipkin bridge

Add the following XML configuration to your project. Each element is explained in the inline comments.

<!-- Brave bridge + Zipkin reporter --> <dependency> <groupId>io.micrometer</groupId> <artifactId>micrometer-tracing-bridge-brave</artifactId> </dependency> <dependency> <groupId>io.zipkin.reporter2</groupId> <artifactId>zipkin-reporter-brave</artifactId> </dependency> management: tracing: sampling: probability: 1.0 zipkin: tracing: endpoint: http://localhost:9411/api/v2/spans # Run Zipkin locally for development docker run -d -p 9411:9411 openzipkin/zipkin # Open http://localhost:9411 to browse traces

The YAML below shows the complete configuration for this feature. Adjust the values to match your environment.

# application.yml — OTLP exporter to Grafana Tempo or any OTEL collector management: tracing: sampling: probability: 1.0 otlp: tracing: endpoint: http://tempo:4318/v1/traces # Grafana Tempo OTLP HTTP endpoint # Or gRPC: # endpoint: http://tempo:4317 # docker-compose.yml — Grafana stack for local tracing services: tempo: image: grafana/tempo:latest ports: - "3200:3200" # query - "4318:4318" # OTLP HTTP command: ["-config.file=/etc/tempo.yaml"] grafana: image: grafana/grafana:latest ports: - "3000:3000" environment: - GF_AUTH_ANONYMOUS_ENABLED=true OTLP (OpenTelemetry Protocol) is the vendor-neutral standard. Exporting via OTLP means you can switch between Jaeger, Tempo, Honeycomb, Datadog, or any OTEL-compatible backend by changing a single endpoint URL — no code changes.

With Micrometer Tracing on the classpath, Spring Boot auto-instruments all common I/O operations — no code changes needed:

// Downstream service — trace context is extracted automatically from request headers @RestController public class OrderController { @GetMapping("/orders/{id}") public Order getOrder(@PathVariable Long id) { // Micrometer creates a "GET /orders/{id}" server span automatically // traceId from the incoming "traceparent" header is used return orderService.findById(id); } } // Upstream service calling downstream — traceparent injected automatically @Service @RequiredArgsConstructor public class CheckoutService { private final WebClient webClient; public Mono<Inventory> checkInventory(Long productId) { return webClient.get() .uri("http://inventory-service/products/{id}/stock", productId) // "traceparent: 00-{traceId}-{spanId}-01" added automatically .retrieve() .bodyToMono(Inventory.class); } }

The class below shows the implementation. Key points are highlighted in the inline comments.

import io.micrometer.tracing.Tracer; import io.micrometer.tracing.Span; @Service @RequiredArgsConstructor public class OrderService { private final Tracer tracer; public Order processOrder(CreateOrderRequest req) { // Create a custom child span for a meaningful business operation Span span = tracer.nextSpan() .name("order.process") .tag("order.customer-id", req.customerId().toString()) .tag("order.item-count", String.valueOf(req.items().size())) .start(); try (Tracer.SpanInScope ws = tracer.withSpan(span)) { Order order = doProcessOrder(req); span.tag("order.id", order.id().toString()); span.event("order.confirmed"); return order; } catch (Exception ex) { span.error(ex); throw ex; } finally { span.end(); } } } // Simpler — @NewSpan and @ContinueSpan AOP annotations @Service public class InventoryService { @NewSpan("inventory.reserve") // creates a new child span automatically public boolean reserveStock(@SpanTag("product.id") Long productId, int qty) { return doReserve(productId, qty); } }

The class below shows the implementation. Key points are highlighted in the inline comments.

import io.micrometer.tracing.BaggageField; // Writing baggage at the entry point (e.g., API gateway or first service) @Component public class TenantBaggagePropagator implements Filter { private static final BaggageField TENANT_ID = BaggageField.create("tenant-id"); @Override public void doFilter(ServletRequest req, ServletResponse res, FilterChain chain) throws IOException, ServletException { String tenantId = ((HttpServletRequest) req).getHeader("X-Tenant-Id"); if (tenantId != null) { TENANT_ID.updateValue(tenantId); // stored in trace context, propagated downstream } chain.doFilter(req, res); } } // Reading baggage in any downstream service @Service public class TenantAwareService { private static final BaggageField TENANT_ID = BaggageField.create("tenant-id"); public String getCurrentTenant() { return TENANT_ID.getValue(); // extracted from incoming trace context headers } public List<Product> getProducts() { String tenant = getCurrentTenant(); return productRepo.findByTenant(tenant); } } Baggage is propagated in every outbound request header — keep values small (IDs, flags) and never put sensitive data (tokens, passwords) in baggage. Baggage adds overhead proportional to the number of services in the call chain.

The YAML below shows the complete configuration for this feature. Adjust the values to match your environment.

# application.yml — enable MDC tracing fields in logs logging: pattern: level: "%5p [${spring.application.name:},%X{traceId:-},%X{spanId:-}]" # Logback automatically picks up traceId/spanId from MDC when Micrometer Tracing is present # Sample log output — every line includes traceId and spanId INFO [order-service,65f2c3a1e4b7d8f9,3a1e4b7d] Started OrderServiceApplication INFO [order-service,65f2c3a1e4b7d8f9,3a1e4b7d] Processing order for customer 42 INFO [order-service,65f2c3a1e4b7d8f9,8c4f2b1e] Reserving stock for product 101 INFO [order-service,65f2c3a1e4b7d8f9,8c4f2b1e] Stock reserved successfully # Grep all logs for a single request across all services using the traceId: # grep "65f2c3a1e4b7d8f9" /var/log/order-service.log /var/log/inventory-service.log // Add custom fields to MDC alongside the automatic traceId/spanId @Component public class UserContextLoggingFilter extends OncePerRequestFilter { @Override protected void doFilterInternal(HttpServletRequest req, HttpServletResponse res, FilterChain chain) throws ServletException, IOException { String userId = req.getHeader("X-User-Id"); try { if (userId != null) MDC.put("userId", userId); chain.doFilter(req, res); } finally { MDC.remove("userId"); } } } In Grafana Loki, log lines are automatically linked to traces when they share the same traceId. Clicking a trace in Grafana Tempo will show correlated log lines — giving you the full picture of what happened without switching between tools.