using System.Linq; using ChatBot.Common.Constants; using ChatBot.Models.Configuration; using ChatBot.Models.Dto; using ChatBot.Services; using ChatBot.Services.Interfaces; using ChatBot.Tests.TestUtilities; using FluentAssertions; using Microsoft.Extensions.Logging; using Microsoft.Extensions.Options; using Moq; using OllamaSharp.Models.Chat; namespace ChatBot.Tests.Services; public class AIServiceTests : UnitTestBase { private readonly Mock> _loggerMock; private readonly Mock _modelServiceMock; private readonly Mock _ollamaClientMock; private readonly Mock _systemPromptServiceMock; private readonly Mock _compressionServiceMock; private readonly AISettings _aiSettings; private readonly AIService _aiService; public AIServiceTests() { _loggerMock = TestDataBuilder.Mocks.CreateLoggerMock(); var ollamaSettings = TestDataBuilder.Configurations.CreateOllamaSettings(); var ollamaOptionsMock = TestDataBuilder.Mocks.CreateOptionsMock(ollamaSettings); _modelServiceMock = new Mock( Mock.Of>(), ollamaOptionsMock.Object ); _ollamaClientMock = TestDataBuilder.Mocks.CreateOllamaClientMock(); _systemPromptServiceMock = new Mock( Mock.Of>(), TestDataBuilder .Mocks.CreateOptionsMock(TestDataBuilder.Configurations.CreateAISettings()) .Object ); _compressionServiceMock = TestDataBuilder.Mocks.CreateCompressionServiceMock(); _aiSettings = TestDataBuilder.Configurations.CreateAISettings(); var optionsMock = TestDataBuilder.Mocks.CreateOptionsMock(_aiSettings); _aiService = new AIService( _loggerMock.Object, _modelServiceMock.Object, _ollamaClientMock.Object, optionsMock.Object, _systemPromptServiceMock.Object, _compressionServiceMock.Object ); } [Fact] public async Task GenerateChatCompletionAsync_ShouldReturnResponse_WhenSuccessful() { // Arrange var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2); var expectedResponse = "Test AI response"; var model = "llama3.2"; _modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model); _systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt"); var responseBuilder = new System.Text.StringBuilder(); responseBuilder.Append(expectedResponse); _ollamaClientMock .Setup(x => x.ChatAsync(It.IsAny())) .Returns( TestDataBuilder.Mocks.CreateAsyncEnumerable( new List { new OllamaSharp.Models.Chat.ChatResponseStream { Message = new Message(ChatRole.Assistant, expectedResponse), }, } ) ); // Act var result = await _aiService.GenerateChatCompletionAsync(messages); // Assert result.Should().Be(expectedResponse); _ollamaClientMock.Verify( x => x.ChatAsync(It.IsAny()), Times.Once ); } [Fact] public async Task GenerateChatCompletionAsync_ShouldThrowException_WhenOllamaClientThrows() { // Arrange var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(2); var model = "llama3.2"; _modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model); _systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt"); _ollamaClientMock .Setup(x => x.ChatAsync(It.IsAny())) .Throws(new Exception("Ollama client error")); // Act & Assert var result = await _aiService.GenerateChatCompletionAsync(messages); result.Should().Be(AIResponseConstants.DefaultErrorMessage); } [Fact] public async Task GenerateChatCompletionWithCompressionAsync_ShouldUseCompression_WhenEnabled() { // Arrange var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(10); var expectedResponse = "Test AI response with compression"; var model = "llama3.2"; _modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model); _systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt"); _compressionServiceMock.Setup(x => x.ShouldCompress(20, 10)).Returns(true); _compressionServiceMock .Setup(x => x.CompressHistoryAsync( It.IsAny>(), 5, It.IsAny() ) ) .ReturnsAsync(messages.TakeLast(5).ToList()); _ollamaClientMock .Setup(x => x.ChatAsync(It.IsAny())) .Returns( TestDataBuilder.Mocks.CreateAsyncEnumerable( new List { new OllamaSharp.Models.Chat.ChatResponseStream { Message = new Message(ChatRole.Assistant, expectedResponse), }, } ) ); // Act var result = await _aiService.GenerateChatCompletionWithCompressionAsync(messages); // Assert result.Should().Be(expectedResponse); _compressionServiceMock.Verify(x => x.ShouldCompress(20, 10), Times.Once); _compressionServiceMock.Verify( x => x.CompressHistoryAsync( It.IsAny>(), 5, It.IsAny() ), Times.Once ); } [Fact] public async Task GenerateChatCompletionWithCompressionAsync_ShouldNotUseCompression_WhenNotNeeded() { // Arrange var messages = TestDataBuilder.ChatMessages.CreateMessageHistory(3); var expectedResponse = "Test AI response without compression"; var model = "llama3.2"; _modelServiceMock.Setup(x => x.GetCurrentModel()).Returns(model); _systemPromptServiceMock.Setup(x => x.GetSystemPromptAsync()).ReturnsAsync("System prompt"); _compressionServiceMock.Setup(x => x.ShouldCompress(6, 10)).Returns(false); _ollamaClientMock .Setup(x => x.ChatAsync(It.IsAny())) .Returns( TestDataBuilder.Mocks.CreateAsyncEnumerable( new List { new OllamaSharp.Models.Chat.ChatResponseStream { Message = new Message(ChatRole.Assistant, expectedResponse), }, } ) ); // Act var result = await _aiService.GenerateChatCompletionWithCompressionAsync(messages); // Assert result.Should().Be(expectedResponse); _compressionServiceMock.Verify(x => x.ShouldCompress(6, 10), Times.Once); _compressionServiceMock.Verify( x => x.CompressHistoryAsync( It.IsAny>(), It.IsAny(), It.IsAny() ), Times.Never ); } }