In python, using socket programming how to show data received at client end?

111
December 11, 2019, at 04:50 AM

I am facing an issue in showing data on the client-side. Here I am validating data using MySQL database and when the data is valid, from the 2nd database the appropriate data will send to the client end. It is sending on client-end but not showing anything. server-side.py

import mysql.connector as mysql # This module is used to connect python with MySQL database 
import socket
import sys 
import json
mydb1=mysql.connect(
        user = 'rajat',
        passwd = 'rajat',
        host = 'localhost',
        database = 'myforum'
        )
def server_program():
    HOST = '192.168.0.115' #this is your localhost
    PORT = 8888
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    #socket.socket: must use to create a socket.
    #socket.AF_INET: Address Format, Internet = IP Addresses.
    #socket.SOCK_STREAM: two-way, connection-based byte streams.
    # Binding socket with post and host.
    try:
        s.bind((HOST, PORT))
    except socket.error as err:
        sys.exit()
    #listen(): This method sets up and start TCP listener. 
    s.listen(10)
    print ('Socket is now listening')
    while True:
        conn, addr = s.accept()
        df1 = conn.recv(1024)
        conn, addr = s.accept()
        df7 = conn.recv(1024)
        df8 = json.loads(df7)
        df2 = list(df8.values())        
        # This is mysql database which is connected with python.
        #Here we need user,host,password and database name to connect with python
        mycursor1=mydb1.cursor()
        mycursor1.execute("SELECT * FROM form_simpleform WHERE id=1")# Its a mysql query which is selecting all fields from table where id is equals to one.
        df3=mycursor1.fetchone()
        if df2 == (list(df3)):
            print('Yes Data Exists')
            # Here is the 2nd database which is used when data is valid. 
            #If data is valid, it extracts the valid data of that id and send back the data to the client.
            mydb2=mysql.connect(
            user = 'rajat',
            passwd = 'rajat',
            host = 'localhost',
            database = 'after_validation'
            )
            mycursor2 = mydb2.cursor()
            mycursor2.execute("SELECT * FROM android_display_data WHERE id=2")
            df4 = mycursor2.fetchone()
            conn.send(str(df4).encode('utf-8'))
            conn.close()#Here the data is send to the client side.  json.dumps(df4).encode('utf-8')
            print("Data is sent to the client ")
            break
        else:
            print('Invalid Data')
            break
    s.close()
if __name__ == '__main__':
    server_program()

Client-side.py

import socket
import sys
import json
def client_prog():
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.connect(('192.168.0.115', 8888))
    for args in sys.argv:
        if args == '':
            args = 'no args'
        else:
            df1 = 'connected'
            s.sendall(df1.encode('utf-8'))
            print("done")
            df5 = s.recv(1024)          
            print(df5)
if __name__ == '__main__':
    client_prog()

Output from server-side.py

C:\Users\Pallavai\DJANGO PROJECTS\Socket>python Server-Side.py
Socket is now listening
Yes Data Exists
Data is sent to the client

output from client-side.py

C:\Users\Pallavai\DJANGO PROJECTS\Socket>python client-sidetxt.py
done
b''

How to show data on client side which is in b''. I tried pickle, json, str method to do it. But it was not working. Your help will be appriciated.

Rent Charter Buses Company
READ ALSO
importlib not working for files in subdirectory

importlib not working for files in subdirectory

I have a directory structure like this :

150
Distributions not Working with RandomizedSearchCV

Distributions not Working with RandomizedSearchCV

I have the following parameters I'm trying to search through for a lightgbm model

117
pySpark convert result of mapPartitions to spark DataFrame

pySpark convert result of mapPartitions to spark DataFrame

I have a job requires to run on a partitioned spark dataframe, and the process looks like:

392