From 9d6c4b40f073effaec9c0950723c4cbd2bc160a7 Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Tue, 16 Jan 2024 01:44:22 +0100 Subject: [PATCH] Added authorization and multi access to multiple servers --- README.md | 127 ++++++++++++++++++++++++------------ authorized_users.txt | 3 + config.ini | 1 + ollama_proxy_server/main.py | 36 +++++++++- requirements.txt | 3 +- requirements_dev.txt | 3 +- 6 files changed, 128 insertions(+), 45 deletions(-) create mode 100644 authorized_users.txt diff --git a/README.md b/README.md index d249b75..1488263 100644 --- a/README.md +++ b/README.md @@ -1,63 +1,108 @@ -# 🌺 **Petals Server** +# Ollama Proxy Server -![Petals Logo](.png) +Ollama Proxy Server is a lightweight reverse proxy server designed for load balancing and rate limiting. It is licensed under the Apache 2.0 license and can be installed using pip. This README covers setting up, installing, and using the Ollama Proxy Server. -**One decentralized tool for text generation and community collaboration** +## Prerequisites +Make sure you have Python (>=3.8) and Apache installed on your system before proceeding. -## Table of Contents -1. [Introduction](#intro) -2. [Requirements](#requirements) -3. [Installation](#installation) -4. [Usage](#usage) -5. [License](#license) -6. [Contact](#contact) -7. [Endpoints](#endpoints) +## Installation +1. Clone or download the `ollama_proxy_server` repository from GitHub: https://github.com/ParisNeo/ollama_proxy_server +2. Navigate to the cloned directory in the terminal and run `pip install -e .` ---- +## Configuration -## 🌺 **Introduction** () +### Servers configuration (config.ini) +Create a file named `config.ini` in the same directory as your script, containing server configurations: +```makefile +[Server1] +url = http://localhost:8080/ +queue_size = 5 -Petals is a decentralized text generation network designed to connect users with large language models, allowing them to harness the power of the community for efficient and collaborative text generation. With Petals Server, you can share your hardware resources (CPU and GPU) to contribute to the network while also utilizing it to generate text on demand. +[Server2] +url = http://localhost:8081/ +queue_size = 3 ---- +# Add as many servers as needed, in the same format as [Server1] and [Server2]. +``` +Replace `http://localhost:8080/` with the URL and port of the first server. The `queue_size` value indicates the maximum number of requests that can be queued at a given time for this server. -## 🌺 **Requirements** () +### Authorized users (authorized_users.txt) +Create a file named `authorized_users.txt` in the same directory as your script, containing a list of user:key pairs, separated by commas and each on a new line: +```makefile +user1,key1 +user2,key2 +``` +Replace `user1`, `key1`, `user2`, and `key2` with the desired username and API key for each user. -To get started with Petals Server, ensure you have the following prerequisites: -- Git for cloning the repository -- Python 3.11 or higher -- Operating system: Linux, macOS, or Windows with WSL (Windows Subsystem for Linux) +## Usage +### Starting the server +Start the Ollama Proxy Server by running the following command in your terminal: +```bash +python ollama_proxy_server.py +``` +The server will listen on port 808x, with x being the number of available ports starting from 0 (e.g., 8080, 8081, etc.). The first available port will be automatically selected if no other instance is running. ---- +### Client requests +To send a request to the server, use the following command: +```bash +curl -X -H "Authorization: Bearer " http://localhost:/ [--data ] +``` +Replace `` with the HTTP method (GET or POST), `` with a valid user:key pair from your `authorized_users.txt`, `` with the port number of your running Ollama Proxy Server, and `` with the target endpoint URL (e.g., "/api/generate"). If you are making a POST request, include the `--data ` option to send data in the body. -## 🌺 **Installation** () +For example: +```bash +curl -X POST -H "Authorization: Bearer user1:key1" http://localhost:8080/api/generate --data '{"data": "Hello, World!"}' +``` # Ollama Proxy Server -Follow these steps to install Petals Server on your local machine: -1. Clone the Git repository using `git clone https://github.com/ParisNeo/petals_server.git` -2. Navigate into the cloned directory (`cd petals_server`) -3. Install dependencies with pip by running `pip install -e .` -4. Launch the server with `petals_server` +Ollama Proxy Server is a lightweight reverse proxy server designed for load balancing and rate limiting. It is licensed under the Apache 2.0 license and can be installed using pip. This README covers setting up, installing, and using the Ollama Proxy Server. ---- +## Prerequisites +Make sure you have Python (>=3.8) and Apache installed on your system before proceeding. -## 🌺 **Usage** () +## Installation +1. Clone or download the `ollama_proxy_server` repository from GitHub: https://github.com/ParisNeo/ollama_proxy_server +2. Navigate to the cloned directory in the terminal and run `pip install -e .` -Once installed, you can use Petals Server as a decentralized text generation client and contribute your hardware resources to the network. +## Configuration ---- +### Servers configuration (config.ini) +Create a file named `config.ini` in the same directory as your script, containing server configurations: +```makefile +[Server1] +url = http://localhost:8080/ +queue_size = 5 -## 🌺 **License** () +[Server2] +url = http://localhost:8081/ +queue_size = 3 -Petals Server is licensed under the [Apache License v2.0](https://www.apache.org/licenses/LICENSE-2.0). +# Add as many servers as needed, in the same format as [Server1] and [Server2]. +``` +Replace `http://localhost:8080/` with the URL and port of the first server. The `queue_size` value indicates the maximum number of requests that can be queued at a given time for this server. ---- +### Authorized users (authorized_users.txt) +Create a file named `authorized_users.txt` in the same directory as your script, containing a list of user:key pairs, separated by commas and each on a new line: +```makefile +user1:key1, user2:key2 +``` +Replace `user1`, `key1`, `user2`, and `key2` with the desired username and API key for each user. -## 🌺 **Contact** () +## Usage +### Starting the server +Start the Ollama Proxy Server by running the following command in your terminal: +```bash +python ollama_proxy_server.py +``` +The server will listen on port 808x, with x being the number of available ports starting from 0 (e.g., 8080, 8081, etc.). The first available port will be automatically selected if no other instance is running. -For any queries or feedback, reach out to ParisNeo on Twitter (@SpaceNerduino), Discord (https://discord.gg/BDxacQmv), or subscribe to the r/lollms Subreddit for community updates and discussions. +### Client requests +To send a request to the server, use the following command: +```bash +curl -X -H "Authorization: Bearer " http://localhost:/ [--data ] +``` +Replace `` with the HTTP method (GET or POST), `` with a valid user:key pair from your `authorized_users.txt`, `` with the port number of your running Ollama Proxy Server, and `` with the target endpoint URL (e.g., "/api/generate"). If you are making a POST request, include the `--data ` option to send data in the body. ---- - -## 🌺 **Endpoints** () - -To explore all available endpoints, navigate to `http://localhost:8000/docs`. +For example: +```bash +curl -X POST -H "Authorization: Bearer user1:key1" http://localhost:8080/api/generate --data '{"data": "Hello, World!"}' +``` \ No newline at end of file diff --git a/authorized_users.txt b/authorized_users.txt new file mode 100644 index 0000000..f3baa89 --- /dev/null +++ b/authorized_users.txt @@ -0,0 +1,3 @@ +user1,0XAXAXAQX5A1F +user2,0XAXAXXQX5A1F +user3,0XAXAXAXX5A1F diff --git a/config.ini b/config.ini index eeea4d6..95162c8 100644 --- a/config.ini +++ b/config.ini @@ -5,3 +5,4 @@ url = http://localhost:11434 url = http://localhost:3002 # Add more servers as you need. + diff --git a/ollama_proxy_server/main.py b/ollama_proxy_server/main.py index 41e9f6f..8f91f79 100644 --- a/ollama_proxy_server/main.py +++ b/ollama_proxy_server/main.py @@ -6,21 +6,34 @@ from queue import Queue import requests import threading import argparse - +import base64 +from ascii_colors import ASCIIColors def get_config(filename): config = configparser.ConfigParser() config.read(filename) return [(name, {'url': config[name]['url'], 'queue': Queue()}) for name in config.sections()] +# Read the authorized users and their keys from a file +def get_authorized_users(filename): + with open(filename, 'r') as f: + lines = f.readlines() + authorized_users = {} + for line in lines: + user, key = line.strip().split(',') + authorized_users[user] = key + return authorized_users + def main(): parser = argparse.ArgumentParser() - parser.add_argument('--config',default="config.ini", help='Path to the config file') # , required=True + parser.add_argument('--config',default="config.ini", help='Path to the authorized users list') + parser.add_argument('--users_list', default="authorized_users.txt", help='Path to the config file') parser.add_argument('--port', type=int, default=8000, help='Port number for the server') args = parser.parse_args() servers = get_config(args.config) + authorized_users = get_authorized_users(args.users_list) class RequestHandler(BaseHTTPRequestHandler): @@ -31,16 +44,35 @@ def main(): self.wfile.write(response.content) def do_GET(self): + self.log_request() self.proxy() def do_POST(self): + self.log_request() self.proxy() + def _validate_user_and_key(self): + # Extract the bearer token from the headers + auth_header = self.headers.get('Authorization') + if not auth_header or not auth_header.startswith('Bearer '): + return False + token = auth_header.split(' ')[1] + user, key = token.split(':') + + # Check if the user and key are in the list of authorized users + return authorized_users.get(user) == key + def proxy(self): + if not self._validate_user_and_key(): + ASCIIColors.red(f'User is not authorized') + self.send_response(403) + self.end_headers() + return url = urlparse(self.path) path = url.path get_params = parse_qs(url.query) or {} + if self.command == "POST": content_length = int(self.headers['Content-Length']) post_data = self.rfile.read(content_length) diff --git a/requirements.txt b/requirements.txt index 3dd9c9c..f254b8e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,4 +2,5 @@ configparser queues requests urllib3 -requests \ No newline at end of file +requests +ascii_colors \ No newline at end of file diff --git a/requirements_dev.txt b/requirements_dev.txt index 908455f..6be106c 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -3,4 +3,5 @@ queues==6.4.0 requests==2.27.1 socketserver==3.5.0 urllib3==1.26.8 -requests \ No newline at end of file +requests +ascii_colors \ No newline at end of file