config:

spring.ai:
  model:
    chat: ollama
  ollama:
    base-url: http://xxx:11434
    chat:
      options:
        model: qwen3:32b
        temperature: 0

question: print python Code snippet helloworld

outstream : print("hello world")

Expected. ```\n\n print ("hello world") \n\n```

this question resolved in 1.1.0-M3 but in offical is reproduction

Comment From: youcangetme

This is also happening in the OpenAI implementation so I don't think its at the model level. I thought I was going crazy because it doesn't seem to remove all of them, just when they are the first char in a chunk or the last, maybe? At first I blamed rsockets but after spending two days on that I can say that it's somewhere in what the code below is calling. I can see in the chat memory the \n just fine which is even more confusing. I am using v1.1.0.

    public Flux<ChatStreamEvent> streamChat(final ChatMessageRequest request) {

        final String conversationId = Objects.toString(request.conversationId(), "default");
        final String message = Objects.toString(request.message(), "");

        log.atDebug().log("ChatService.chat conversationId={} message={}", conversationId, message);

        // First meta event so UI can instantly show "thinking..."
        final Flux<ChatStreamEvent> meta = Flux
            .just(ChatStreamEvent.meta("Received message, contacting OpenAI..."));

        // Raw streamed responses (for metadata + rate limit)
        final Flux<ChatResponse> responseFlux = this.chatClient.prompt()
            .system(PREAMBLE)
            .advisors(advisor -> advisor.param(ChatMemory.CONVERSATION_ID, conversationId))
            .user(message)
            .stream()
            .chatResponse() // Flux<ChatResponse>
            .doOnNext(this::captureRateLimit);

        // Turn only text-bearing chunks into ChatStreamEvent.token(...)
        final Flux<ChatStreamEvent> content = responseFlux.flatMap(response -> {
            final Generation generation = response.getResult();
            if (generation == null) {
                // no generations in this chunk (e.g. tool/usage-only) -> skip
                return Flux.empty();
            }

            if (generation.getOutput() == null) {
                return Flux.empty();
            }

            final String text = generation.getOutput().getText();
            if (!StringUtils.hasText(text)) {
                return Flux.empty();
            }

            return Flux.just(ChatStreamEvent.token(text));
        });

        return meta.concatWith(content).concatWith(Flux.just(ChatStreamEvent.end())).onErrorResume(ex -> {
            log.warn("Error during chat stream", ex);
            return Flux.just(ChatStreamEvent.error("LLM error: " + ex.getMessage()));
        });
    }