add more tests
Some checks failed
SonarQube / Build and analyze (push) Failing after 2m56s
Unit Tests / Run Tests (push) Failing after 2m28s

This commit is contained in:
Leonid Pershin
2025-10-20 07:02:12 +03:00
parent af9773e7d6
commit 1647fe19d3
12 changed files with 3714 additions and 21 deletions

View File

@@ -204,4 +204,317 @@ public class AIServiceTests : UnitTestBase
Times.Never
);
}
[Fact]
public async Task GenerateChatCompletionAsync_ShouldRetryOnHttpRequestException_AndEventuallySucceed()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
var expectedResponse = "Success after retry";
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
var callCount = 0;
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Returns(() =>
{
callCount++;
if (callCount == 1)
{
var ex = new HttpRequestException("Service temporarily unavailable");
ex.Data["StatusCode"] = 503;
throw ex;
}
else
{
return TestDataBuilder.Mocks.CreateAsyncEnumerable(
new List<OllamaSharp.Models.Chat.ChatResponseStream>
{
new OllamaSharp.Models.Chat.ChatResponseStream
{
Message = new Message(ChatRole.Assistant, expectedResponse),
},
}
);
}
});
// Act
var result = await _aiService.GenerateChatCompletionAsync(messages);
// Assert
result.Should().Be(expectedResponse);
_ollamaClientMock.Verify(
x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()),
Times.Exactly(2)
);
}
[Fact]
public async Task GenerateChatCompletionAsync_ShouldRetryOnHttpRequestException_AndEventuallyFail()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
var ex = new HttpRequestException("Service unavailable");
ex.Data["StatusCode"] = 503;
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Throws(ex);
// Act
var result = await _aiService.GenerateChatCompletionAsync(messages);
// Assert
result.Should().Be(AIResponseConstants.DefaultErrorMessage);
_ollamaClientMock.Verify(
x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()),
Times.Exactly(3) // MaxRetryAttempts = 3
);
}
[Fact]
public async Task GenerateChatCompletionAsync_ShouldHandleTimeoutException()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Throws(new TimeoutException("Request timed out"));
// Act
var result = await _aiService.GenerateChatCompletionAsync(messages);
// Assert
result.Should().Be(AIResponseConstants.DefaultErrorMessage);
}
[Fact]
public async Task GenerateChatCompletionAsync_ShouldRetryWithExponentialBackoff_WhenEnabled()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
// Create AIService with exponential backoff enabled
var aiSettings = new AISettings
{
MaxRetryAttempts = 3,
RetryDelayMs = 100,
EnableExponentialBackoff = true,
MaxRetryDelayMs = 1000,
};
var optionsMock = TestDataBuilder.Mocks.CreateOptionsMock(aiSettings);
var aiService = new AIService(
_loggerMock.Object,
_modelServiceMock.Object,
_ollamaClientMock.Object,
optionsMock.Object,
_systemPromptServiceMock.Object,
_compressionServiceMock.Object
);
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
var ex = new HttpRequestException("Service unavailable");
ex.Data["StatusCode"] = 503;
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Throws(ex);
// Act
var result = await aiService.GenerateChatCompletionAsync(messages);
// Assert
result.Should().Be(AIResponseConstants.DefaultErrorMessage);
_ollamaClientMock.Verify(
x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()),
Times.Exactly(3)
);
}
[Fact]
public async Task GenerateChatCompletionAsync_ShouldRetryWithLinearBackoff_WhenExponentialDisabled()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
// Create AIService with linear backoff
var aiSettings = new AISettings
{
MaxRetryAttempts = 3,
RetryDelayMs = 100,
EnableExponentialBackoff = false,
MaxRetryDelayMs = 1000,
};
var optionsMock = TestDataBuilder.Mocks.CreateOptionsMock(aiSettings);
var aiService = new AIService(
_loggerMock.Object,
_modelServiceMock.Object,
_ollamaClientMock.Object,
optionsMock.Object,
_systemPromptServiceMock.Object,
_compressionServiceMock.Object
);
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
var ex = new HttpRequestException("Service unavailable");
ex.Data["StatusCode"] = 503;
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Throws(ex);
// Act
var result = await aiService.GenerateChatCompletionAsync(messages);
// Assert
result.Should().Be(AIResponseConstants.DefaultErrorMessage);
_ollamaClientMock.Verify(
x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()),
Times.Exactly(3)
);
}
[Theory]
[InlineData(502, 2000)] // Bad Gateway
[InlineData(503, 3000)] // Service Unavailable
[InlineData(504, 5000)] // Gateway Timeout
[InlineData(429, 5000)] // Too Many Requests
[InlineData(500, 1000)] // Internal Server Error
public async Task GenerateChatCompletionAsync_ShouldApplyCorrectRetryDelay_ForStatusCode(
int statusCode,
int expectedAdditionalDelay
)
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
var ex = new HttpRequestException($"HTTP {statusCode}");
ex.Data["StatusCode"] = statusCode;
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Throws(ex);
// Act
var result = await _aiService.GenerateChatCompletionAsync(messages);
// Assert
result.Should().Be(AIResponseConstants.DefaultErrorMessage);
_ollamaClientMock.Verify(
x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()),
Times.Exactly(3)
);
}
[Fact]
public async Task GenerateChatCompletionAsync_ShouldHandleCancellationToken()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
var cts = new CancellationTokenSource();
cts.Cancel(); // Cancel immediately
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
// Act
var result = await _aiService.GenerateChatCompletionAsync(messages, cts.Token);
// Assert
result.Should().Be(AIResponseConstants.DefaultErrorMessage);
}
[Fact]
public async Task GenerateChatCompletionAsync_ShouldLogRetryAttempts()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
var ex = new HttpRequestException("Service unavailable");
ex.Data["StatusCode"] = 503;
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Throws(ex);
// Act
var result = await _aiService.GenerateChatCompletionAsync(messages);
// Assert
result.Should().Be(AIResponseConstants.DefaultErrorMessage);
// Verify that retry warnings were logged
_loggerMock.Verify(
x =>
x.Log(
LogLevel.Warning,
It.IsAny<EventId>(),
It.Is<It.IsAnyType>((v, t) => v.ToString()!.Contains("HTTP request failed")),
It.IsAny<Exception>(),
It.IsAny<Func<It.IsAnyType, Exception?, string>>()
),
Times.AtLeast(2) // At least 2 retry attempts
);
}
[Fact]
public async Task GenerateChatCompletionAsync_ShouldLogFinalError_WhenAllRetriesExhausted()
{
// Arrange
var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2);
var model = "llama3.2";
_modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model);
_systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt");
var ex = new Exception("Final error");
_ollamaClientMock
.Setup(x => x.ChatAsync(It.IsAny<OllamaSharp.Models.Chat.ChatRequest>()))
.Throws(ex);
// Act
var result = await _aiService.GenerateChatCompletionAsync(messages);
// Assert
result.Should().Be(AIResponseConstants.DefaultErrorMessage);
// Verify that final error was logged
_loggerMock.Verify(
x =>
x.Log(
LogLevel.Error,
It.IsAny<EventId>(),
It.Is<It.IsAnyType>(
(v, t) => v.ToString()!.Contains("Failed to generate chat completion")
),
It.IsAny<Exception>(),
It.IsAny<Func<It.IsAnyType, Exception?, string>>()
),
Times.AtLeast(3) // One for each attempt
);
}
}