Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion backend/onyx/connectors/web/connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
from onyx.connectors.models import Document
from onyx.connectors.models import TextSection
from onyx.file_processing.extract_file_text import read_pdf_file
from onyx.file_processing.html_utils import web_html_cleanup
from onyx.file_processing.html_utils import web_html_cleanup, remove_by_selector
from onyx.utils.logger import setup_logger
from onyx.utils.sitemap import list_pages_for_site
from shared_configs.configs import MULTI_TENANT
Expand Down Expand Up @@ -438,13 +438,19 @@ def __init__(
mintlify_cleanup: bool = True, # Mostly ok to apply to other websites as well
batch_size: int = INDEX_BATCH_SIZE,
scroll_before_scraping: bool = False,
remove_by_selector: list[str] = [],
**kwargs: Any,
) -> None:
self.mintlify_cleanup = mintlify_cleanup
self.batch_size = batch_size
self.recursive = False
self.scroll_before_scraping = scroll_before_scraping
self.remove_by_selector = remove_by_selector or []
self.web_connector_type = web_connector_type

if not isinstance(self.remove_by_selector, list):
self.remove_by_selector = []

if web_connector_type == WEB_CONNECTOR_VALID_SETTINGS.RECURSIVE.value:
self.recursive = True
self.to_visit_list = [_ensure_valid_url(base_url)]
Expand Down Expand Up @@ -571,6 +577,8 @@ def _do_scrape(

content = page.content()
soup = BeautifulSoup(content, "html.parser")

remove_by_selector(soup, self.remove_by_selector)

if self.recursive:
internal_links = get_internal_links(
Expand Down
18 changes: 18 additions & 0 deletions backend/onyx/file_processing/html_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,3 +219,21 @@ def web_html_cleanup(
cleaned_text = page_text.replace("\u200b", "")

return ParsedHTML(title=title, cleaned_text=cleaned_text)

def remove_by_selector(soup: bs4.BeautifulSoup, selector: list[str]):
tag = soup.select_one("meta[name='remove_by_selector']")
if tag and tag.has_attr("content"):
page_selector = [tag["content"].strip()]
else:
page_selector = []

for sel in (selector + page_selector):
sel = sel.strip()
if not sel:
continue
for s in sel.split(","):
s = s.strip()
if not s:
continue
for tag in soup.select(s):
tag.decompose()
9 changes: 9 additions & 0 deletions web/src/lib/connectors/connectors.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,15 @@ export const connectorConfigs: Record<
name: "scroll_before_scraping",
optional: true,
},
{
type: "list",
query: "Remove by selector:",
label: "Remove by selector",
description:
"List of css selectors used to exclude html elements from scraping",
name: "remove_by_selector",
optional: true
},
],
overrideDefaultFreq: 60 * 60 * 24,
},
Expand Down
Loading