This guide demonstrates how to integrate Trust3 client for access control in your Python applications when Trust3 server is deployed as a Snowflake native app, providing data governance and security for AI/LLM interactions.
Go to your installed Snowflake application in your account
Launch the application - this will open the Streamlit app for managing Trust3 application server
Click on "Refresh" button to obtain the Trust3 server base URL
SNOWFLAKE_PAT_TOKEN:
This is a programmatic access token used to authenticate with Snowflake, as every native app endpoint uses Snowflake authentication as the first layer of authentication.
importuuidfromtrust3_clientimportclientastrust3_shield_clientfromtrust3_client.modelimportConversationTypeimporttrust3_client.exception# Initialize Trust3 clienttrust3_shield_client.setup(frameworks=[])# Setup your application with Trust3app=trust3_shield_client.setup_app(endpoint=TRUST3_SERVER_BASE_URL,application_config_api_key=TRUST3_AI_APP_API_KEY,snowflake_pat_token=SNOWFLAKE_PAT_TOKEN)
# Replace with actual username or service accountuser="your-username"withtrust3_shield_client.create_shield_context(application=app,username=user):# Your AI logic/prompt/response validation goes herepass
try:# Generate unique thread ID for conversation trackingthread_id=str(uuid.uuid4())# Original user promptuser_prompt="User's input text here"# Validate prompt with Trust3validated_prompt=trust3_shield_client.check_access(text=user_prompt,conversation_type=ConversationType.PROMPT,thread_id=thread_id)# Extract the validated textsafe_prompt=validated_prompt[0].response_textexcepttrust3_client.exception.AccessControlExceptionase:print(f"Prompt blocked by access control: {e}")# Handle blocked prompt appropriately
try:# Get response from LLMllm_response="LLM generated response"# Validate response with Trust3validated_response=trust3_shield_client.check_access(text=llm_response,conversation_type=ConversationType.REPLY,thread_id=thread_id# Same thread_id used for prompt)# Extract the validated responsesafe_response=validated_response[0].response_textexcepttrust3_client.exception.AccessControlExceptionase:print(f"Response blocked by access control: {e}")# Handle blocked response appropriately
importasttry:# Generate unique thread ID for conversation trackingthread_id=str(uuid.uuid4())withtrust3_shield_client.create_shield_context(application=app,username=user):# Get vector database filter expressionfilter_response=trust3_shield_client.get_vector_db_filter_expression(thread_id=thread_id)# By default, filter_response is a string. Convert it to a dictionary if required by your vector databasefilter=ast.literal_eval(filter_response)# Pass on this filter to your vector database API call# Example: results = vector_db.search(query, filter=filter)print(f"Vector DB Filter: {filter}")# To audit vector database operations, pass the filter information back to Trust3# Get the current vector database information for auditingfilter_response_dict=trust3_shield_client.get_current("vectorDBInfo")# Create a new shield context with vector database info for subsequent operationswithtrust3_shield_client.create_shield_context(application=app,username=user,vectorDBInfo=filter_response_dict):# Perform your prompt/response validation with vector DB audit trail# This ensures Trust3 can track which data was accessed from the vector databasepassexcepttrust3_client.exception.AccessControlExceptionase:print(f"Filter access denied: {e}")# Handle filter access denial appropriately
fromopenaiimportOpenAIimportuuidfromtrust3_clientimportclientastrust3_shield_clientfromtrust3_client.modelimportConversationTypeimporttrust3_client.exception# Initialize Trust3 clienttrust3_shield_client.setup(frameworks=[])TRUST3_SERVER_BASE_URL="<your-trust3-server-base-url>"SNOWFLAKE_PAT_TOKEN="<your-snowflake-pat-token>"TRUST3_AI_APP_API_KEY="<your-trust3-ai-app-api-key>"# Setup your application with Trust3app=trust3_shield_client.setup_app(endpoint=TRUST3_SERVER_BASE_URL,application_config_api_key=TRUST3_AI_APP_API_KEY,snowflake_pat_token=SNOWFLAKE_PAT_TOKEN)defsecure_ai_chat(user_prompt,username="testuser"):""" Secure AI chat function with Trust3 integration """try:# Generate conversation thread IDthread_id=str(uuid.uuid4())# Create shield context for the userwithtrust3_shield_client.create_shield_context(application=app,username=username):print(f"Original prompt: {user_prompt}")# 1. Validate user promptvalidated_prompt=trust3_shield_client.check_access(text=user_prompt,conversation_type=ConversationType.PROMPT,thread_id=thread_id)safe_prompt=validated_prompt[0].response_textprint(f"Validated prompt: {safe_prompt}")# 2. Send to LLM (example with OpenAI)openai_client=OpenAI()# Ensure OPENAI_API_KEY is setresponse=openai_client.chat.completions.create(model="gpt-4",messages=[{"role":"user","content":safe_prompt}],temperature=0)llm_response=response.choices[0].message.contentprint(f"LLM response: {llm_response}")# 3. Validate LLM responsevalidated_response=trust3_shield_client.check_access(text=llm_response,conversation_type=ConversationType.REPLY,thread_id=thread_id)safe_response=validated_response[0].response_textprint(f"Final response: {safe_response}")returnsafe_responseexcepttrust3_client.exception.AccessControlExceptionase:print(f"Access denied: {e}")return"I'm sorry, I cannot process this request due to security policies."exceptExceptionase:print(f"Error: {e}")return"An error occurred while processing your request."# Usageif__name__=="__main__":user_question="What is my email address if it's abc@gmail.com?"response=secure_ai_chat(user_question)print(f"Bot: {response}")
try:# Trust3 operationspassexcepttrust3_client.exception.AccessControlExceptionase:# Log the violation for audit purposeslogger.warning(f"Access control violation: {e}")# Return appropriate user-friendly messagereturn"Request cannot be processed due to security policies."