Files
ChatBot/ChatBot.Tests/Services/AIServiceTests.cs
Leonid Pershin 6c34b9cbb9
All checks were successful
SonarQube / Build and analyze (push) Successful in 3m46s
Unit Tests / Run Tests (push) Successful in 2m21s
add latest tests
2025-10-20 09:29:08 +03:00

520 lines
19 KiB
C#

using ChatBot.Common.Constants;
using ChatBot.Models.Configuration;
using ChatBot.Models.Dto;
using ChatBot.Services;
using ChatBot.Services.Interfaces;
using ChatBot.Tests.TestUtilities;
using FluentAssertions;
using Microsoft.Extensions.Logging;
using Moq;
using OllamaSharp.Models.Chat;
namespace ChatBot.Tests.Services;
public class AIServiceTests : UnitTestBase
{
private readonly Mock<ILogger<AIService>> _loggerMock;
private readonly Mock<ModelService> _modelServiceMock;
private readonly Mock<IOllamaClient> _ollamaClientMock;
private readonly Mock<SystemPromptService> _systemPromptServiceMock;
private readonly Mock<IHistoryCompressionService> _compressionServiceMock;
private readonly AISettings _aiSettings;
private readonly AIService _aiService;
public AIServiceTests()
{
_loggerMock = TestDataBuilder.Mocks.CreateLoggerMock<AIService>();
var ollamaSettings = TestDataBuilder.Configurations.CreateOllamaSettings();
var ollamaOptionsMock = TestDataBuilder.Mocks.CreateOptionsMock(ollamaSettings);
_modelServiceMock = new Mock<ModelService>(
Mock.Of<ILogger<ModelService>>(),
ollamaOptionsMock.Object
);
_ollamaClientMock = TestDataBuilder.Mocks.CreateOllamaClientMock();
_systemPromptServiceMock = new Mock<SystemPromptService>(
Mock.Of<ILogger<SystemPromptService>>(),
TestDataBuilder
.Mocks.CreateOptionsMock(TestDataBuilder.Configurations.CreateAISettings())
.Object
);
_compressionServiceMock = TestDataBuilder.Mocks.CreateCompressionServiceMock();
_aiSettings = TestDataBuilder.Configurations.CreateAISettings();
var optionsMock = TestDataBuilder.Mocks.CreateOptionsMock(_aiSettings);
_aiService = new AIService(
_loggerMock.Object,
_modelServiceMock.Object,
_ollamaClientMock.Object,
optionsMock.Object,
_systemPromptServiceMock.Object,
_compressionServiceMock.Object
);
}
[Fact]
public async Task GenerateChatCompletionAsync_ShouldReturnResponse_WhenSuccessful()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var expectedResponse = "Test AI response";
var model = "llama3.2";
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
var responseBuilder = new System.Text.StringBuilder();
responseBuilder.Append(expectedResponse);
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Returns(
TestDataBuilder.Mocks.CreateAsyncEnumerable(
new List<OllamaSharp.Models.Chat.ChatResponseStream>
{
new OllamaSharp.Models.Chat.ChatResponseStream
{
Message = new Message(ChatRole.Assistant, expectedResponse),
},
}
)
);
// Act
var result = await _aiService.GenerateChatCompletionAsync(messages);
// Assert
result.Should().Be(expectedResponse);
_ollamaClientMock.Verify(
x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()),
Times.Once
);
}
[Fact]
public async Task GenerateChatCompletionAsync_ShouldThrowException_WhenOllamaClientThrows()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Throws(new Exception("Ollama client error"));
// Act & Assert
var result = await _aiService.GenerateChatCompletionAsync(messages);
result.Should().Be(AIResponseConstants.DefaultErrorMessage);
}
[Fact]
public async Task GenerateChatCompletionWithCompressionAsync_ShouldUseCompression_WhenEnabled()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(10);
var expectedResponse = "Test AI response with compression";
var model = "llama3.2";
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
_compressionServiceMock.Setup(x => x.ShouldCompress(20, 10)).Returns(true);
_compressionServiceMock
.Setup(x =>
x.CompressHistoryAsync(
It.IsAny<List<ChatMessage>>(),
5,
It.IsAny<CancellationToken>()
)
)
.ReturnsAsync(messages.TakeLast(5).ToList());
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Returns(
TestDataBuilder.Mocks.CreateAsyncEnumerable(
new List<OllamaSharp.Models.Chat.ChatResponseStream>
{
new OllamaSharp.Models.Chat.ChatResponseStream
{
Message = new Message(ChatRole.Assistant, expectedResponse),
},
}
)
);
// Act
var result = await _aiService.GenerateChatCompletionWithCompressionAsync(messages);
// Assert
result.Should().Be(expectedResponse);
_compressionServiceMock.Verify(x => x.ShouldCompress(20, 10), Times.Once);
_compressionServiceMock.Verify(
x =>
x.CompressHistoryAsync(
It.IsAny<List<ChatMessage>>(),
5,
It.IsAny<CancellationToken>()
),
Times.Once
);
}
[Fact]
public async Task GenerateChatCompletionWithCompressionAsync_ShouldNotUseCompression_WhenNotNeeded()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(3);
var expectedResponse = "Test AI response without compression";
var model = "llama3.2";
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
_compressionServiceMock.Setup(x => x.ShouldCompress(6, 10)).Returns(false);
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Returns(
TestDataBuilder.Mocks.CreateAsyncEnumerable(
new List<OllamaSharp.Models.Chat.ChatResponseStream>
{
new OllamaSharp.Models.Chat.ChatResponseStream
{
Message = new Message(ChatRole.Assistant, expectedResponse),
},
}
)
);
// Act
var result = await _aiService.GenerateChatCompletionWithCompressionAsync(messages);
// Assert
result.Should().Be(expectedResponse);
_compressionServiceMock.Verify(x => x.ShouldCompress(6, 10), Times.Once);
_compressionServiceMock.Verify(
x =>
x.CompressHistoryAsync(
It.IsAny<List<ChatMessage>>(),
It.IsAny<int>(),
It.IsAny<CancellationToken>()
),
Times.Never
);
}
[Fact]
public async Task GenerateChatCompletionAsync_ShouldRetryOnHttpRequestException_AndEventuallySucceed()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
var expectedResponse = "Success after retry";
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
var callCount = 0;
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Returns(() =>
{
callCount++;
if (callCount == 1)
{
var ex = new HttpRequestException("Service temporarily unavailable");
ex.Data["StatusCode"] = 503;
throw ex;
}
else
{
return TestDataBuilder.Mocks.CreateAsyncEnumerable(
new List<OllamaSharp.Models.Chat.ChatResponseStream>
{
new OllamaSharp.Models.Chat.ChatResponseStream
{
Message = new Message(ChatRole.Assistant, expectedResponse),
},
}
);
}
});
// Act
var result = await _aiService.GenerateChatCompletionAsync(messages);
// Assert
result.Should().Be(expectedResponse);
_ollamaClientMock.Verify(
x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()),
Times.Exactly(2)
);
}
[Fact]
public async Task GenerateChatCompletionAsync_ShouldRetryOnHttpRequestException_AndEventuallyFail()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
var ex = new HttpRequestException("Service unavailable");
ex.Data["StatusCode"] = 503;
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Throws(ex);
// Act
var result = await _aiService.GenerateChatCompletionAsync(messages);
// Assert
result.Should().Be(AIResponseConstants.DefaultErrorMessage);
_ollamaClientMock.Verify(
x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()),
Times.Exactly(3) // MaxRetryAttempts = 3
);
}
[Fact]
public async Task GenerateChatCompletionAsync_ShouldHandleTimeoutException()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Throws(new TimeoutException("Request timed out"));
// Act
var result = await _aiService.GenerateChatCompletionAsync(messages);
// Assert
result.Should().Be(AIResponseConstants.DefaultErrorMessage);
}
[Fact]
public async Task GenerateChatCompletionAsync_ShouldRetryWithExponentialBackoff_WhenEnabled()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
// Create AIService with exponential backoff enabled
var aiSettings = new AISettings
{
MaxRetryAttempts = 3,
RetryDelayMs = 100,
EnableExponentialBackoff = true,
MaxRetryDelayMs = 1000,
};
var optionsMock = TestDataBuilder.Mocks.CreateOptionsMock(aiSettings);
var aiService = new AIService(
_loggerMock.Object,
_modelServiceMock.Object,
_ollamaClientMock.Object,
optionsMock.Object,
_systemPromptServiceMock.Object,
_compressionServiceMock.Object
);
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
var ex = new HttpRequestException("Service unavailable");
ex.Data["StatusCode"] = 503;
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Throws(ex);
// Act
var result = await aiService.GenerateChatCompletionAsync(messages);
// Assert
result.Should().Be(AIResponseConstants.DefaultErrorMessage);
_ollamaClientMock.Verify(
x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()),
Times.Exactly(3)
);
}
[Fact]
public async Task GenerateChatCompletionAsync_ShouldRetryWithLinearBackoff_WhenExponentialDisabled()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
// Create AIService with linear backoff
var aiSettings = new AISettings
{
MaxRetryAttempts = 3,
RetryDelayMs = 100,
EnableExponentialBackoff = false,
MaxRetryDelayMs = 1000,
};
var optionsMock = TestDataBuilder.Mocks.CreateOptionsMock(aiSettings);
var aiService = new AIService(
_loggerMock.Object,
_modelServiceMock.Object,
_ollamaClientMock.Object,
optionsMock.Object,
_systemPromptServiceMock.Object,
_compressionServiceMock.Object
);
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
var ex = new HttpRequestException("Service unavailable");
ex.Data["StatusCode"] = 503;
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Throws(ex);
// Act
var result = await aiService.GenerateChatCompletionAsync(messages);
// Assert
result.Should().Be(AIResponseConstants.DefaultErrorMessage);
_ollamaClientMock.Verify(
x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()),
Times.Exactly(3)
);
}
[Theory]
[InlineData(502)] // Bad Gateway
[InlineData(503)] // Service Unavailable
[InlineData(504)] // Gateway Timeout
[InlineData(429)] // Too Many Requests
[InlineData(500)] // Internal Server Error
public async Task GenerateChatCompletionAsync_ShouldApplyCorrectRetryDelay_ForStatusCode(
int statusCode
)
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
var ex = new HttpRequestException($"HTTP {statusCode}");
ex.Data["StatusCode"] = statusCode;
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Throws(ex);
// Act
var result = await _aiService.GenerateChatCompletionAsync(messages);
// Assert
result.Should().Be(AIResponseConstants.DefaultErrorMessage);
_ollamaClientMock.Verify(
x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()),
Times.Exactly(3)
);
}
[Fact]
public async Task GenerateChatCompletionAsync_ShouldHandleCancellationToken()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
using var cts = new CancellationTokenSource();
await cts.CancelAsync(); // Cancel immediately
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
// Act
var result = await _aiService.GenerateChatCompletionAsync(messages, cts.Token);
// Assert
result.Should().Be(string.Empty); // When cancelled immediately, returns empty string
}
[Fact]
public async Task GenerateChatCompletionAsync_ShouldLogRetryAttempts()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
var ex = new HttpRequestException("Service unavailable");
ex.Data["StatusCode"] = 503;
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Throws(ex);
// Act
var result = await _aiService.GenerateChatCompletionAsync(messages);
// Assert
result.Should().Be(AIResponseConstants.DefaultErrorMessage);
// Verify that retry warnings were logged
_loggerMock.Verify(
x =>
x.Log(
LogLevel.Warning,
It.IsAny<EventId>(),
It.Is<It.IsAnyType>((v, t) => v.ToString()!.Contains("HTTP request failed")),
It.IsAny<Exception>(),
It.IsAny<Func<It.IsAnyType, Exception?, string>>()
),
Times.AtLeast(2) // At least 2 retry attempts
);
}
[Fact]
public async Task GenerateChatCompletionAsync_ShouldLogFinalError_WhenAllRetriesExhausted()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
var ex = new Exception("Final error");
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Throws(ex);
// Act
var result = await _aiService.GenerateChatCompletionAsync(messages);
// Assert
result.Should().Be(AIResponseConstants.DefaultErrorMessage);
// Verify that final error was logged
_loggerMock.Verify(
x =>
x.Log(
LogLevel.Error,
It.IsAny<EventId>(),
It.Is<It.IsAnyType>(
(v, t) => v.ToString()!.Contains("Failed to generate chat completion")
),
It.IsAny<Exception>(),
It.IsAny<Func<It.IsAnyType, Exception?, string>>()
),
Times.AtLeast(3) // One for each attempt
);
}
}