Skip to content

Sentry integration #58

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions cortext/reward.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import annotations
import sentry_sdk
from transformers import logging as hf_logging
hf_logging.set_verbosity_error()

Expand Down Expand Up @@ -58,6 +59,7 @@ def calculate_text_similarity(text1: str, text2: str):
bt.logging.debug(f"Similarity: {similarity}")
return similarity
except Exception as e:
sentry_sdk.capture_exception()
bt.logging.error(f"Error in calculate_text_similarity: {traceback.format_exc()}")
raise

Expand All @@ -82,6 +84,7 @@ async def api_score(api_answer: str, response: str, weight: float, temperature:

return score
except Exception as e:
sentry_sdk.capture_exception()
bt.logging.error(f"Exception in api_score: {traceback.format_exc()}")


Expand Down Expand Up @@ -119,6 +122,7 @@ async def is_image_url(url: str) -> bool:
async with session.head(url) as response:
return response.status == 200 and 'image' in response.headers.get('Content-Type', '')
except Exception as e:
sentry_sdk.capture_exception()
bt.logging.info(f"Error checking URL: {e}")
return False

Expand All @@ -134,6 +138,7 @@ async def load_image_from_url(url: str):
image.verify() # Verify that this is indeed an image
return image
except Exception as e:
sentry_sdk.capture_exception()
bt.logging.info(f"Failed to load image: {e}")


Expand Down Expand Up @@ -191,6 +196,7 @@ async def dalle_score(uid, url, desired_size, description, weight, similarity_th
bt.logging.debug(f"UID {uid} failed similary test with score of: {round(similarity, 5)}. Score = {0}")
return 0
except Exception as e:
sentry_sdk.capture_exception()
bt.logging.info(f"Error in image scoring for UID {uid}: {e}")
return 0

Expand Down
22 changes: 22 additions & 0 deletions cortext/sentry.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
from bittensor import config as Config
import bittensor as bt
import sentry_sdk

from cortext import __version__

def init_sentry(config : Config, tags : dict = {}):
if config.sentry_dsn is None:
bt.logging.info(f"Sentry is DISABLED")
return

bt.logging.info(f"Sentry is ENABLED. Using dsn={config.sentry_dsn}")
sentry_sdk.init(
dsn=config.sentry_dsn,
release=__version__,
environment=f"subnet #{config.netuid}",
traces_sample_rate=1.0,
profiles_sample_rate=1.0
)

for key, value in tags.items():
sentry_sdk.set_tag(key, value)
10 changes: 10 additions & 0 deletions cortext/utils.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from __future__ import annotations
import sentry_sdk

import io
import ast
Expand Down Expand Up @@ -69,6 +70,7 @@ def load_state_from_file(filename: str):
load_success = True # Set flag to true as the operation was successful
return state
except Exception as e: # Catch specific exceptions for better error handling
sentry_sdk.capture_exception()
bt.logging.error(f"error loading state, deleting and resetting it. Error: {e}")
os.remove(filename) # Delete if error

Expand Down Expand Up @@ -187,9 +189,11 @@ async def get_list(list_type, num_questions_needed, theme=None):
break
bt.logging.error(f"no list found in {new_answer}")
except Exception as e:
sentry_sdk.capture_exception()
bt.logging.error(f"Exception on retry {retry + 1} for prompt '{selected_prompts[i]}': "
f"{e}\n{traceback.format_exc()}")
except Exception as e:
sentry_sdk.capture_exception()
bt.logging.error(f"Exception in processing initial response for prompt '{selected_prompts[i]}': "
f"{e}\n{traceback.format_exc()}")

Expand Down Expand Up @@ -362,6 +366,7 @@ def extract_python_list(text: str):
return evaluated

except Exception as e:
sentry_sdk.capture_exception()
bt.logging.error(f"found double quotes in list, trying again")

return None
Expand All @@ -384,6 +389,7 @@ async def call_openai(messages, temperature, model, seed=1234, max_tokens=2048,
return response

except Exception as e:
sentry_sdk.capture_exception()
bt.logging.error(f"Error when calling OpenAI: {traceback.format_exc()}")
await asyncio.sleep(0.5)

Expand All @@ -410,6 +416,7 @@ async def call_gemini(messages, temperature, model, max_tokens, top_p, top_k):
print(f"validator response is {response.text}")
return response.text
except:
sentry_sdk.capture_exception()
print(f"error in call_gemini {traceback.format_exc()}")


Expand Down Expand Up @@ -453,6 +460,7 @@ async def call_anthropic(prompt, temperature, model, max_tokens=2048, top_p=1, t

return completion.completion
except Exception as e:
sentry_sdk.capture_exception()
bt.logging.error(f"Error when calling Anthropic: {traceback.format_exc()}")
await asyncio.sleep(0.5)

Expand Down Expand Up @@ -480,6 +488,7 @@ async def call_claude(messages, temperature, model, max_tokens, top_p, top_k):
bt.logging.debug(f"validator response is {message.content[0].text}")
return message.content[0].text
except:
sentry_sdk.capture_exception()
bt.logging.error(f"error in call_claude {traceback.format_exc()}")

async def call_stability(prompt, seed, steps, cfg_scale, width, height, samples, sampler):
Expand Down Expand Up @@ -539,4 +548,5 @@ def send_discord_alert(message, webhook_url):
else:
print(f"Failed to send Discord alert. Status code: {response.status_code}")
except Exception as e:
sentry_sdk.capture_exception()
print(f"Failed to send Discord alert: {e}", exc_info=True)
13 changes: 12 additions & 1 deletion miner/claude_miner.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import base # noqa

from cortext.sentry import init_sentry
import sentry_sdk
import argparse
import asyncio
import copy
Expand Down Expand Up @@ -79,6 +81,7 @@ def __init__(self, config=None, axon=None, wallet=None, subtensor=None):
self.config = self.config()
self.config.merge(base_config)
check_config(StreamMiner, self.config)
init_sentry(self.config, {"neuron-type": "claude-miner"})
bt.logging.info(self.config) # TODO: duplicate print?
self.prompt_cache: dict[str, Tuple[str, int]] = {}
self.request_timestamps = {}
Expand Down Expand Up @@ -201,6 +204,7 @@ def base_blacklist(self, synapse, blacklist_amt = 20000) -> Tuple[bool, str]:
return False, f"accepting {synapse_type} request from {hotkey}"

except Exception:
sentry_sdk.capture_exception()
bt.logging.error(f"errror in blacklist {traceback.format_exc()}")


Expand Down Expand Up @@ -320,11 +324,13 @@ def run(self):
step += 1

except KeyboardInterrupt:
sentry_sdk.capture_exception()
self.axon.stop()
bt.logging.success("Miner killed by keyboard interrupt.")
sys.exit()

except Exception:
sentry_sdk.capture_exception()
bt.logging.error(traceback.format_exc())

def run_in_background_thread(self) -> None:
Expand Down Expand Up @@ -358,7 +364,7 @@ def config(self) -> bt.config:
return bt.config(parser)

def add_args(cls, parser: argparse.ArgumentParser):
pass
parser.add_argument("--sentry-dsn",type=str,default=None,help="The url that sentry will use to send exception information to")

async def embeddings(self, synapse: Embeddings) -> Embeddings:
bt.logging.info(f"entered embeddings processing for embeddings of len {len(synapse.texts)}")
Expand Down Expand Up @@ -395,6 +401,7 @@ async def get_embeddings_in_batch(texts, model, batch_size=10):
bt.logging.info(f"synapse response is {synapse.embeddings[0][:10]}")
return synapse
except Exception:
sentry_sdk.capture_exception()
bt.logging.error(f"Exception in embeddings function: {traceback.format_exc()}")


Expand Down Expand Up @@ -463,6 +470,7 @@ async def images(self, synapse: ImageResponse) -> ImageResponse:
return synapse

except Exception as exc:
sentry_sdk.capture_exception()
bt.logging.error(f"error in images: {exc}\n{traceback.format_exc()}")

def prompt(self, synapse: StreamPrompting) -> StreamPrompting:
Expand Down Expand Up @@ -572,6 +580,7 @@ async def _prompt(synapse, send: Send):
bt.logging.error(f"Unknown provider: {provider}")

except Exception as e:
sentry_sdk.capture_exception()
bt.logging.error(f"error in _prompt {e}\n{traceback.format_exc()}")

token_streamer = partial(_prompt, synapse)
Expand Down Expand Up @@ -618,12 +627,14 @@ def get_valid_hotkeys(config):
if hotkey not in valid_hotkeys:
valid_hotkeys.append(hotkey)
except Exception:
sentry_sdk.capture_exception()
bt.logging.debug(f"exception in get_valid_hotkeys: {traceback.format_exc()}")

bt.logging.info(f"total valid hotkeys list = {valid_hotkeys}")
time.sleep(180)

except json.JSONDecodeError as e:
sentry_sdk.capture_exception()
bt.logging.debug(f"JSON decoding error: {e} {run.id}")


Expand Down
11 changes: 11 additions & 0 deletions miner/miner.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import argparse
import sentry_sdk
import asyncio
import base64
import copy
Expand All @@ -15,6 +16,7 @@
from typing import Tuple

import bittensor as bt
from cortext.sentry import init_sentry
import google.generativeai as genai
import wandb
from PIL import Image
Expand Down Expand Up @@ -101,6 +103,7 @@ def __init__(self, config=None, axon=None, wallet=None, subtensor=None):
self.config = self.config()
self.config.merge(base_config)
check_config(StreamMiner, self.config)
init_sentry(self.config, {"neuron-type": "miner"})
bt.logging.info(self.config)
self.prompt_cache: dict[str, Tuple[str, int]] = {}
self.request_timestamps = {}
Expand Down Expand Up @@ -226,6 +229,7 @@ def base_blacklist(self, synapse, blacklist_amt = 20000) -> Tuple[bool, str]:
return False, f"accepting {synapse_type} request from {hotkey}"

except Exception:
sentry_sdk.capture_exception()
bt.logging.error(f"errror in blacklist {traceback.format_exc()}")


Expand Down Expand Up @@ -314,11 +318,13 @@ def run(self):
step += 1

except KeyboardInterrupt:
sentry_sdk.capture_exception()
self.axon.stop()
bt.logging.success("Miner killed by keyboard interrupt.")
sys.exit()

except Exception:
sentry_sdk.capture_exception()
bt.logging.error(traceback.format_exc())

def run_in_background_thread(self) -> None:
Expand Down Expand Up @@ -488,6 +494,7 @@ async def _prompt(synapse, send: Send):
bt.logging.error(f"Unknown provider: {provider}")

except Exception as e:
sentry_sdk.capture_exception()
bt.logging.error(f"error in _prompt {e}\n{traceback.format_exc()}")

token_streamer = partial(_prompt, synapse)
Expand Down Expand Up @@ -558,6 +565,7 @@ async def images(self, synapse: ImageResponse) -> ImageResponse:
return synapse

except Exception as exc:
sentry_sdk.capture_exception()
bt.logging.error(f"error in images: {exc}\n{traceback.format_exc()}")

async def embeddings(self, synapse: Embeddings) -> Embeddings:
Expand Down Expand Up @@ -595,6 +603,7 @@ async def get_embeddings_in_batch(texts, model, batch_size=10):
bt.logging.info(f"synapse response is {synapse.embeddings[0][:10]}")
return synapse
except Exception:
sentry_sdk.capture_exception()
bt.logging.error(f"Exception in embeddings function: {traceback.format_exc()}")

async def is_alive(self, synapse: IsAlive) -> IsAlive:
Expand Down Expand Up @@ -644,12 +653,14 @@ def get_valid_hotkeys(config):
if hotkey not in valid_hotkeys:
valid_hotkeys.append(hotkey)
except Exception:
sentry_sdk.capture_exception()
bt.logging.debug(f"exception in get_valid_hotkeys: {traceback.format_exc()}")

bt.logging.info(f"total valid hotkeys list = {valid_hotkeys}")
time.sleep(180)

except json.JSONDecodeError as e:
sentry_sdk.capture_exception()
bt.logging.debug(f"JSON decoding error: {e} {run.id}")


Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,4 @@ boto3
anthropic_bedrock
pyOpenSSL
google-generativeai
sentry_sdk==1.44
2 changes: 2 additions & 0 deletions start_validator.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import argparse
import sentry_sdk
import time
import subprocess
import cortext
Expand Down Expand Up @@ -46,4 +47,5 @@ def update_and_restart(pm2_name, wallet_name, wallet_hotkey, address, autoupdate
try:
update_and_restart(args.pm2_name, args.wallet_name, args.wallet_hotkey, args.address, args.autoupdate)
except Exception as e:
sentry_sdk.capture_exception()
parser.error(f"An error occurred: {e}")
3 changes: 3 additions & 0 deletions test_scripts/get_valid_valis.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
import sentry_sdk
import time
import wandb
import json
Expand Down Expand Up @@ -64,12 +65,14 @@ def get_valid_hotkeys(config):
if hotkey not in valid_hotkeys:
valid_hotkeys.append(hotkey)
except Exception as e:
sentry_sdk.capture_exception()
print(f"exception in get_valid_hotkeys: {traceback.format_exc()}")

print(f"total valid hotkeys list = {valid_hotkeys}")
time.sleep(180)

except json.JSONDecodeError as e:
sentry_sdk.capture_exception()
print(f"JSON decoding error: {e} {run.id}")

config = get_config()
Expand Down
3 changes: 3 additions & 0 deletions test_scripts/t2e/test_embeddings.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
import sentry_sdk
import random
import asyncio
import traceback
Expand Down Expand Up @@ -34,6 +35,7 @@ async def get_embeddings_in_batch(texts, model, batch_size=10):
batch_embeddings = [item.embedding for item in response.data]
all_embeddings.extend(batch_embeddings)
except Exception as e:
sentry_sdk.capture_exception()
bt.logging.error(f"Error in processing batch: {e}")

return all_embeddings
Expand All @@ -43,6 +45,7 @@ async def get_embeddings_in_batch(texts, model, batch_size=10):
embeddings = [np.array(embed) for embed in batched_embeddings]
return embeddings
except Exception as e:
sentry_sdk.capture_exception()
bt.logging.error(f"Exception in embeddings function: {traceback.format_exc()}")


Expand Down
3 changes: 3 additions & 0 deletions test_scripts/t2t/test_gemini.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
import sentry_sdk
import google.generativeai as genai
import traceback
import asyncio
Expand Down Expand Up @@ -57,6 +58,7 @@ async def call_gemini(messages, temperature, model, max_tokens, top_p, top_k):
print(stream)
return stream.text
except:
sentry_sdk.capture_exception()
print(f"error in call_gemini {traceback.format_exc()}")

# Non streaming
Expand All @@ -81,6 +83,7 @@ async def call_gemini(messages, temperature, model, max_tokens, top_p, top_k):
print(f"validator response is {response.text}")
return response.text
except:
sentry_sdk.capture_exception()
print(f"error in call_gemini {traceback.format_exc()}")

async def main():
Expand Down
Loading