API POST requequest using python request module

from urllib.parse import urlencode
import json
import requests
base_url = "post_api_url"
headers = {"Content-type":"application/json" , "x-user-id": "xxxx"}
parameters = {
"page": 1,
"skip": 0,
"order_by": "desc",
"param1": [],
"param1": [],
"start_date": "2021-10-20T10:45:40.034650" ,
"end_date": "2021-10-27T10:45:40.034849"
}
resources = requests.post(base_url2, headers=headers, data=json.dumps(parameters)).json()
print("Total records in from stash:", resources["total_records"])
all_id = []
for j in range(16):
parameters = {"case_id": [], "entity": [],"risk_categories": [],"source_cateogories":[],
"domains": [],
"rag_status": [],
"start_date":start_date ,
"end_date": end_date,
"skip": j*10,
"order_by": "desc"
}
resources = requests.post(base_url, headers=headers, data=json.dumps(parameters)).json()
for i in resources["data"]:
if not i["id"] in all_id:
all_id.append(i["id"])
print(f"page-{j+1}' current total records : {resources['current_total_records']}, ....appended unic records: {len(all_id)}")
print("Final appended records:", len(all_id))
# print(all_id)
View More...

Redis Cache with python

"Redis is an open source, advanced key-value store and an apt solution for building highperformance, scalable web applications"

Example:

# redis==3.5.3
# redis-py-cluster==2.1.3

from rediscluster import RedisCluster

startup_nodes = [
{
"host": "python-ds-production-redis-cache-xxxxxxxxx.euw2.cache.amazonaws.com",
"port": "xxxx",
}
]
redis_cluster = RedisCluster(
startup_nodes=startup_nodes, decode_responses=True, skip_full_coverage_check=True
)

KEY_PREFIX = "python-ds"

def set_cache_key(key, data, expire_time):

   key = f"{KEY_PREFIX}::::{key}"

   redis_cluster.set(key, convert_to_json(data), ex=expire_time)
   return True
def get_cache_key(key):
key = f"{KEY_PREFIX}::::{key}"
data = redis_cluster.get(key)
return convert_from_json(data)
async def get_data(self):
cache_key = f"master::::MyCategory"
cache_data = get_cache_key(cache_key)
if cache_data is not None:
return cache_data
collection = db.connection['collection_name']
data = collection.find({})
all_data = []
async for rk in data:
all_data.append(rk)
response = {"status": True, "message": "Data", "data": all_data}
set_cache_key(cache_key, response, 30 * 24 * 60 * 60)
return response
View More...

Python Logic for Limit Pagination Page Number

# Custom Pagination with prev, next and all pages def get_paging_info(tot_records, current_page, limit=20): total_pages = tot_records//limit # calc pages if tot_records%limit > 0: total_pages = total_pages + 1 skip = (current_page * limit) - limit if skip < 0: skip = 0 data = {} data['total_rows'] = tot_records data['total_pages'] = total_pages data['skip_rows'] = skip data['current_page'] = current_page if total_pages == 1: data['prev'] = None data['next'] = None elif current_page == total_pages: data['prev'] = current_page-1 data['next'] = None elif total_pages > 1 and current_page==1: data['prev'] = None data['next'] = current_page+1 elif current_page > total_pages: data['prev'] = current_page-1 data['next'] = None else: data['prev'] = current_page-1 data['next'] = current_page+1 return data curent_page = docs.get("current_page", 1) order_by = docs.get("order_by", "updated_at") order_type = docs.get("order_type", -1) limit = docs.get("limit", 10) skip = (curent_page * limit) - limit total_records = await collection.count_documents(query_doc) cases = collection.find(query_doc).sort(order_by, order_type).skip(skip).limit(limit) pagination = get_paging_info(total_records, curent_page, limit) return {"status": False, "message": ".....", "data":all_data, "pagination":pagination} View More...

Docker Command

#install mmongodb using docker image

>docker images
>docker pull mongo:latest
>docker run -d -p 27017:27017 -v ~/mongodb-folder:/data/db --name mymongo mongo:latest
>docker ps
>docker exec -it mymongo bash
>show dbs
>.exit
>mongo localhost:2717
>docker rm container_id -f
>docker stop container_id
>docker kill -f container_id
>docker rmi -f container_id

#Dockerize Python(Fastapi) project

step1: Create Dockerfile

write given below code in Dockerfile

From python:3
#Create direcotry
WORKDIR /app

#Copy source code if forder not created then ADD filename filename , COPY filename .

COPY . .
Run pip install -r requirements.txt

EXPOSE 8000

CMD ["source", ".env"]
CMD ["uvicorn", "src.main:app", "--host==0.0.0.0", "--port=8000", "--reload"]

Step2: make docker build

>docker build -t dockermypythonpro
>docker run -p 8000:8000 dockermypythonpro
View More...

What is Numpy and used?

""""
Numpy is Linear Algebra Library of python , the reason it is so important for data Science
with Python is that almost all of the libraries in the PyData Ecosystem rely on Numpy as on of their main
bulding blocks
Numpy is also increadibly fast as it has binding to C libraries
It is highly recommended you install Python using the Anaconda distribution to make sure all underlying dependencies
(Such as Linear Algebra libraries) all Sync up with the use of a conda install.
If you have Anaconda, install Numpy by going to your terminal or command prompt and typing
>conda install numpy
or
>pip install numpy
Numpy arrays essentially come in two flavours. Vectors & matrices.
Vector are strictly 1-d arrays and matrices are 2-d (but you should not a matrix can still have only one row one column)
"""


Code example:

import numpy as np
my_mat = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
print("example 1:", np.array(my_mat))
print("example 2:", np.arange(0, 10))
print("example 3:", np.arange(0, 10, 2))
print("example 4:", np.zeros(3))
print("example 5:", np.ones(4))
print("example 6:", np.ones((3, 4)))
print("example 7:", np.linspace(0, 5, 10))
print("example 8:", np.eye(4))
print("example 9:", np.random.rand(5, 5))
print("example 10:", np.random.randint(1, 30))
print("example 11:", np.random.randint(1, 30, 10))
arr = np.arange(25)
print("example 12:", arr.reshape(5, 5))
print("example 13:", arr.max())
print("example 14:", arr.min())
print("example 15:", arr.argmax())
print("example 16:", arr.argmin())
# Numpy Array Indexing
arr = np.arange(0, 20)
print("indexing example 1:", arr[8])
print("indexing example 2:", arr[1:5])
print("indexing example 3:", arr[:6])
print("indexing example 4:", arr[5:])
slice_of_arr = arr[0:6]
print("indexing example 5:", slice_of_arr)
slice_of_arr[:] = 99
print("indexing example 6:", slice_of_arr)
arr_copy = arr.copy()
print("indexing example 7:", arr_copy)
arr_copy[:] = 100
print("indexing example 8:", arr_copy)
# 2-d Array
arra_2d = np.array([[5, 10, 15], [20, 25, 30]])
print("2-d example 1:", arra_2d[1])
print("2-d example 2:", arra_2d[1][:2])
print("2-d example 3:", arra_2d[1][2])
print("2-d example 4:", arra_2d[1, 2])
print("2-d example 5:", arra_2d[:2, 2])
print("2-d example 6:", arra_2d[:2])
print("2-d example 7:", arra_2d > 6)
arr_2d = np.arange(50).reshape(5, 10)
print(arr_2d)
# Numpy Operations
"""
Array with Array
Array with Scalars
Universal Array Functions
"""
arr = np.arange(0, 11)
print('sum of array', arr+arr)
print('sub of array', arr-arr)
print('mul of array', arr*arr)
print('mul of array', arr*arr)
print('div of array', arr/arr)
print('sum by 100', arr+100)
print('sub by 100', arr-100)
print('mul by 100', arr*100)
print('div by 100', arr/100)
print('sqrt of array', np.sqrt(arr))
print('exp of array', np.exp(arr))
print('max of array', np.max(arr))
print('sin of array', np.sin(arr))
print('log of array', np.log(arr))
View More...

First Previous 2 3 4 5 6 Next Last