|
| 1 | +import os |
| 2 | +import sys |
| 3 | +import pytest |
| 4 | +from unittest.mock import patch, MagicMock |
| 5 | + |
| 6 | +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) |
| 7 | + |
| 8 | +from tool.webpage_scraper import ( # noqa: E402 |
| 9 | + WebpageScraperTool, |
| 10 | + WebpageScraperToolInputSchema, |
| 11 | + WebpageScraperToolOutputSchema, |
| 12 | + WebpageScraperToolConfig, |
| 13 | +) |
| 14 | + |
| 15 | + |
| 16 | +@pytest.fixture |
| 17 | +def mock_requests_get(): |
| 18 | + with patch("tool.webpage_scraper.requests.get") as mock_get: |
| 19 | + # Create mock response |
| 20 | + mock_response = MagicMock() |
| 21 | + mock_response.text = """ |
| 22 | + <html> |
| 23 | + <head> |
| 24 | + <title>Test Page</title> |
| 25 | + <meta name="author" content="Test Author"> |
| 26 | + <meta name="description" content="Test Description"> |
| 27 | + <meta property="og:site_name" content="Test Site"> |
| 28 | + </head> |
| 29 | + <body> |
| 30 | + <main> |
| 31 | + <h1>Test Heading</h1> |
| 32 | + <p>Test paragraph with <a href="https://example.com">link</a>.</p> |
| 33 | + </main> |
| 34 | + </body> |
| 35 | + </html> |
| 36 | + """ |
| 37 | + mock_response.content = mock_response.text.encode("utf-8") |
| 38 | + mock_response.status_code = 200 |
| 39 | + mock_response.raise_for_status = MagicMock() |
| 40 | + |
| 41 | + # Configure the mock |
| 42 | + mock_get.return_value = mock_response |
| 43 | + yield mock_get |
| 44 | + |
| 45 | + |
| 46 | +def test_webpage_scraper_tool_basic(mock_requests_get): |
| 47 | + # Initialize the tool |
| 48 | + scraper_tool = WebpageScraperTool(WebpageScraperToolConfig()) |
| 49 | + input_schema = WebpageScraperToolInputSchema(url="https://example.com") |
| 50 | + |
| 51 | + # Run the tool |
| 52 | + result = scraper_tool.run(input_schema) |
| 53 | + |
| 54 | + # Assertions |
| 55 | + assert isinstance(result, WebpageScraperToolOutputSchema) |
| 56 | + assert "Test Heading" in result.content |
| 57 | + assert "Test paragraph" in result.content |
| 58 | + assert "link" in result.content |
| 59 | + assert result.metadata.title == "Test Page" |
| 60 | + assert result.metadata.author == "Test Author" |
| 61 | + assert result.metadata.description == "Test Description" |
| 62 | + assert result.metadata.site_name == "Test Site" |
| 63 | + assert result.metadata.domain == "example.com" |
| 64 | + assert result.error is None |
| 65 | + |
| 66 | + |
| 67 | +def test_webpage_scraper_tool_without_links(mock_requests_get): |
| 68 | + # Initialize the tool |
| 69 | + scraper_tool = WebpageScraperTool(WebpageScraperToolConfig()) |
| 70 | + input_schema = WebpageScraperToolInputSchema(url="https://example.com", include_links=False) |
| 71 | + |
| 72 | + # Run the tool |
| 73 | + result = scraper_tool.run(input_schema) |
| 74 | + |
| 75 | + # Assertions |
| 76 | + assert isinstance(result, WebpageScraperToolOutputSchema) |
| 77 | + assert "Test paragraph with link" in result.content |
| 78 | + assert "https://example.com" not in result.content # Link URL should not be included |
| 79 | + |
| 80 | + |
| 81 | +def test_webpage_scraper_tool_http_error(mock_requests_get): |
| 82 | + # Configure mock to raise an exception |
| 83 | + mock_requests_get.return_value.raise_for_status.side_effect = Exception("404 Client Error") |
| 84 | + |
| 85 | + # Initialize the tool |
| 86 | + scraper_tool = WebpageScraperTool(WebpageScraperToolConfig()) |
| 87 | + input_schema = WebpageScraperToolInputSchema(url="https://example.com/not-found") |
| 88 | + |
| 89 | + # Run the tool |
| 90 | + result = scraper_tool.run(input_schema) |
| 91 | + |
| 92 | + # Assertions |
| 93 | + assert isinstance(result, WebpageScraperToolOutputSchema) |
| 94 | + assert result.content == "" # Content should be empty |
| 95 | + assert result.metadata.title == "Error retrieving page" |
| 96 | + assert result.metadata.domain == "example.com" |
| 97 | + assert "404 Client Error" in result.error |
| 98 | + |
| 99 | + |
| 100 | +def test_webpage_scraper_tool_content_too_large(mock_requests_get): |
| 101 | + # Configure mock content to exceed max length |
| 102 | + max_length = 1_000_000 |
| 103 | + mock_requests_get.return_value.content = b"a" * (max_length + 1) |
| 104 | + |
| 105 | + # Initialize the tool |
| 106 | + scraper_tool = WebpageScraperTool(WebpageScraperToolConfig(max_content_length=max_length)) |
| 107 | + input_schema = WebpageScraperToolInputSchema(url="https://example.com/large-page") |
| 108 | + |
| 109 | + # Run the tool |
| 110 | + result = scraper_tool.run(input_schema) |
| 111 | + |
| 112 | + # Assertions |
| 113 | + assert isinstance(result, WebpageScraperToolOutputSchema) |
| 114 | + assert "exceeds maximum" in result.error |
| 115 | + |
| 116 | + |
| 117 | +def test_webpage_scraper_tool_extract_metadata(): |
| 118 | + # Initialize the tool |
| 119 | + scraper_tool = WebpageScraperTool(WebpageScraperToolConfig()) |
| 120 | + |
| 121 | + # Create a minimal soup object with metadata |
| 122 | + soup = MagicMock() |
| 123 | + |
| 124 | + # Create individual mock tags with get methods |
| 125 | + author_tag = MagicMock() |
| 126 | + author_tag.get.return_value = "Author Name" |
| 127 | + |
| 128 | + description_tag = MagicMock() |
| 129 | + description_tag.get.return_value = "Page Description" |
| 130 | + |
| 131 | + site_name_tag = MagicMock() |
| 132 | + site_name_tag.get.return_value = "Site Name" |
| 133 | + |
| 134 | + # Configure find method to return the right mock based on arguments |
| 135 | + def mock_find(tag, attrs=None): |
| 136 | + if tag == "meta" and attrs == {"name": "author"}: |
| 137 | + return author_tag |
| 138 | + elif tag == "meta" and attrs == {"name": "description"}: |
| 139 | + return description_tag |
| 140 | + elif tag == "meta" and attrs == {"property": "og:site_name"}: |
| 141 | + return site_name_tag |
| 142 | + return None |
| 143 | + |
| 144 | + soup.find.side_effect = mock_find |
| 145 | + |
| 146 | + doc = MagicMock() |
| 147 | + doc.title.return_value = "Page Title" |
| 148 | + |
| 149 | + # Call the method directly |
| 150 | + metadata = scraper_tool._extract_metadata(soup, doc, "https://example.org/page") |
| 151 | + |
| 152 | + # Assertions |
| 153 | + assert metadata.title == "Page Title" |
| 154 | + assert metadata.author == "Author Name" |
| 155 | + assert metadata.description == "Page Description" |
| 156 | + assert metadata.site_name == "Site Name" |
| 157 | + assert metadata.domain == "example.org" |
| 158 | + |
| 159 | + |
| 160 | +def test_webpage_scraper_tool_clean_markdown(): |
| 161 | + # Initialize the tool |
| 162 | + scraper_tool = WebpageScraperTool(WebpageScraperToolConfig()) |
| 163 | + |
| 164 | + # Input markdown with excess whitespace |
| 165 | + dirty_markdown = """ |
| 166 | + # Title |
| 167 | +
|
| 168 | +
|
| 169 | +
|
| 170 | + Paragraph with trailing spaces |
| 171 | +
|
| 172 | + * List item 1 |
| 173 | + * List item 2 |
| 174 | +
|
| 175 | +
|
| 176 | + """ |
| 177 | + |
| 178 | + # Clean the markdown |
| 179 | + cleaned = scraper_tool._clean_markdown(dirty_markdown) |
| 180 | + |
| 181 | + # Assertions |
| 182 | + assert cleaned.count("\n\n\n") == 0 # No triple newlines |
| 183 | + assert "spaces \n" not in cleaned # No trailing spaces |
| 184 | + assert cleaned.endswith("\n") # Ends with newline |
| 185 | + |
| 186 | + |
| 187 | +if __name__ == "__main__": |
| 188 | + pytest.main([__file__]) |
0 commit comments