Skip to content

Commit

Permalink
Requesthandler bugfixes (#847)
Browse files Browse the repository at this point in the history
* refactor: update scraping logic in state_transition.py

* chore: fix plex watchlist validation, jacket and prowlar invalid response types

* chore: revert _get_indexers changes

---------

Co-authored-by: Gaisberg <None>
  • Loading branch information
Gaisberg authored Nov 4, 2024
1 parent 0d31e41 commit 6d01407
Show file tree
Hide file tree
Showing 5 changed files with 9 additions and 11 deletions.
2 changes: 1 addition & 1 deletion src/program/services/content/plex_watchlist.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def validate(self):
logger.error("Plex token is not set!")
return False
try:
self.api.validate()
self.api.validate_account()
except Exception as e:
logger.error(f"Unable to authenticate Plex account: {e}")
return False
Expand Down
2 changes: 1 addition & 1 deletion src/program/services/scrapers/jackett.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ def _fetch_results(self, url: str, params: Dict[str, str], indexer_title: str, s
"""Fetch results from the given indexer"""
try:
response = get(session=self.session, url=url, params=params, timeout=self.settings.timeout)
return self._parse_xml(response.data)
return self._parse_xml(response.response.text)
except (HTTPError, ConnectionError, Timeout):
logger.debug(f"Indexer failed to fetch results for {search_type}: {indexer_title}")
except Exception as e:
Expand Down
4 changes: 2 additions & 2 deletions src/program/services/scrapers/prowlarr.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,14 +227,14 @@ def _get_indexer_from_json(self, json_content: str) -> list[ProwlarrIndexer]:
indexer_list = []
for indexer in json.loads(json_content):
indexer_list.append(ProwlarrIndexer(title=indexer["name"], id=str(indexer["id"]), link=indexer["infoLink"], type=indexer["protocol"], language=indexer["language"], movie_search_capabilities=(s[0] for s in indexer["capabilities"]["movieSearchParams"]) if len([s for s in indexer["capabilities"]["categories"] if s["name"] == "Movies"]) > 0 else None, tv_search_capabilities=(s[0] for s in indexer["capabilities"]["tvSearchParams"]) if len([s for s in indexer["capabilities"]["categories"] if s["name"] == "TV"]) > 0 else None))

return indexer_list

def _fetch_results(self, url: str, params: Dict[str, str], indexer_title: str, search_type: str) -> List[Tuple[str, str]]:
"""Fetch results from the given indexer"""
try:
response = get(self.session, url, params=params, timeout=self.timeout)
return self._parse_xml(response.data, indexer_title)
return self._parse_xml(response.response.text, indexer_title)
except (HTTPError, ConnectionError, Timeout):
logger.debug(f"Indexer failed to fetch results for {search_type.title()} with indexer {indexer_title}")
except Exception as e:
Expand Down
6 changes: 3 additions & 3 deletions src/program/state_transition.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,12 @@ def process_event(emitted_by: Service, existing_item: MediaItem | None = None, c

elif existing_item is not None and existing_item.last_state == States.Indexed:
next_service = Scraping
if emitted_by != Scraping and Scraping.can_we_scrape(existing_item):
if emitted_by != Scraping and Scraping.should_submit(existing_item):
items_to_submit = [existing_item]
elif existing_item.type == "show":
items_to_submit = [s for s in existing_item.seasons if s.last_state != States.Completed and Scraping.can_we_scrape(s)]
items_to_submit = [s for s in existing_item.seasons if s.last_state != States.Completed and Scraping.should_submit(s)]
elif existing_item.type == "season":
items_to_submit = [e for e in existing_item.episodes if e.last_state != States.Completed and Scraping.can_we_scrape(e)]
items_to_submit = [e for e in existing_item.episodes if e.last_state != States.Completed and Scraping.should_submit(e)]

elif existing_item is not None and existing_item.last_state == States.Scraped:
next_service = Downloader
Expand Down
6 changes: 2 additions & 4 deletions src/program/utils/request.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,10 @@ def __init__(self, message, response=None):

class ResponseObject:
"""Response object to handle different response formats."""
def __init__(self, response: Response, response_type=SimpleNamespace):
def __init__(self, response: Response):
self.response = response
self.is_ok = response.ok
self.status_code = response.status_code
self.response_type = response_type
self.data = self.handle_response(response)

def handle_response(self, response: Response) -> dict:
Expand Down Expand Up @@ -158,7 +157,6 @@ def _make_request(
timeout=5,
additional_headers=None,
retry_if_failed=True,
response_type=SimpleNamespace,
proxies=None,
json=None,
) -> ResponseObject:
Expand All @@ -179,7 +177,7 @@ def _make_request(
finally:
session.close()

return ResponseObject(response, response_type)
return ResponseObject(response)

def ping(session: Session, url: str, timeout: int = 10, additional_headers=None, proxies=None, params=None) -> ResponseObject:
"""Ping method to check connectivity to a URL by making a simple GET request."""
Expand Down

0 comments on commit 6d01407

Please sign in to comment.