Step-by-step guide for integrating and managing multiple ML models in your web app to boost performance and scalability.

Book a call with an Expert
Starting a new venture? Need to upgrade your web app? RapidDev builds application with your growth in mind.
// Example in Python (using Flask as a web framework)
import pickle // For loading pickle-based models
import tensorflow as tf // If using TensorFlow models
class ModelManager:
def init(self):
// Initialize the dictionary to hold models
self.models = {}
def load_pickle_model(self, model_name, file_path):
// Load a pickle model and store it in the dictionary
with open(file\_path, 'rb') as f:
model = pickle.load(f)
self.models[model\_name] = model
def load_tf_model(self, model_name, directory_path):
// Load a TensorFlow SavedModel
model = tf.keras.models.load_model(directory_path)
self.models[model\_name] = model
def get_model(self, model_name):
// Retrieve the model instance based on the name
return self.models.get(model\_name, None)
// Initialize and load models (this would occur during your app startup)
model_manager = ModelManager()
model_manager.load_pickle_model('classifier', 'models/classifier.pkl')
model_manager.load_tf_model('regressor', 'models/regressor')
// Using Flask to create API endpoints
from flask import Flask, request, jsonify
app = Flask(**name**)
@app.route('/predict/<model_name>', methods=['POST'])
def predict(model_name):
// Retrieve the requested model
model = model_manager.get_model(model_name)
if model is None:
return jsonify({'error': 'Model not found'}), 404
// Assume the input is passed as JSON with the key "data"
input\_data = request.json.get('data')
if input\_data is None:
return jsonify({'error': 'No input data provided'}), 400
// Perform prediction using the selected model
// The prediction logic may differ based on the model type
try:
// For pickle-based models, assume a predict() method exists
if model\_name == 'classifier':
prediction = model.predict([input\_data])
// For TensorFlow models, ensure data is preprocessed appropriately
elif model\_name == 'regressor':
prediction = model.predict([input\_data]).tolist()
else:
prediction = 'Unsupported model type'
except Exception as e:
return jsonify({'error': str(e)}), 500
return jsonify({'model': model\_name, 'prediction': prediction})
if name == 'main':
app.run(debug=True)
// Example using asynchronous endpoints with FastAPI
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
app = FastAPI()
class PredictionRequest(BaseModel):
data: list // Expected list of inputs
@app.post('/predict_async/{model_name}')
async def predict_async(model_name: str, request_data: PredictionRequest):
model = model_manager.get_model(model_name)
if model is None:
raise HTTPException(status_code=404, detail="Model not found")
try:
// Run prediction asynchronously if supported, else run in a thread pool
prediction = await run_model_prediction(model, request\_data.data)
except Exception as e:
raise HTTPException(status\_code=500, detail=str(e))
return {'model': model\_name, 'prediction': prediction}
// Dummy async function to simulate prediction
async def run_model_prediction(model, data):
// In a real-world scenario, you might offload this work to a dedicated async routine
return model.predict([data]).tolist()
// Example JavaScript fetch to a prediction endpoint
async function requestPrediction(modelName, inputData) {
try {
const response = await fetch(`/predict/${modelName}`, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({data: inputData})
});
if (!response.ok) {
// Handle error responses
const errorData = await response.json();
console.error('Error:', errorData.error);
return;
}
const result = await response.json();
console.log('Prediction:', result.prediction);
// Update the UI with the prediction result
} catch (error) {
console.error('Request failed:', error);
}
}
// Example extension in the ModelManager for preprocessing
class ModelManager:
def **init**(self):
self.models = {}
self.preprocessors = {}
self.postprocessors = {}
def register_model(self, model_name, model, preprocess_fn=None, postprocess_fn=None):
self.models[model\_name] = model
self.preprocessors[model_name] = preprocess_fn
self.postprocessors[model_name] = postprocess_fn
def predict(self, model_name, raw_input):
model = self.models.get(model\_name)
if model is None:
raise Exception("Model not registered")
// Preprocess input if a function is provided
pre_fn = self.preprocessors.get(model_name)
if pre\_fn:
processed_input = pre_fn(raw\_input)
else:
processed_input = raw_input
// Run the prediction
prediction = model.predict([processed\_input])
// Postprocess output if a function is provided
post_fn = self.postprocessors.get(model_name)
if post\_fn:
return post\_fn(prediction)
return prediction
// Example usage:
def classifier_preprocess(data):
// Insert any classifier specific transformation
return data
def classifier_postprocess(prediction):
// For example, convert probabilities to class labels
return prediction
// Register the classifier with its functions
model_manager.register_model('classifier', loaded_classifier_model, classifier_preprocess, classifier_postprocess)
From startups to enterprises and everything in between, see for yourself our incredible impact.
Need a dedicated strategic tech and growth partner? Discover what RapidDev can do for your business! Book a call with our team to schedule a free, no-obligation consultation. We’ll discuss your project and provide a custom quote at no cost.Â