Python从请求头中获取参数(Burp Suite)

发布时间:2024年01月05日

目录

爬虫获取

需求

代码

从Burp Suite请求体中获取参数


爬虫获取

需求

url参数分三种情况

1. json格式

2. form格式

3. xml格式

已知入参情况有两种

1. Burp Suite?拷贝的请求头信息

2. 爬虫抓取的请求头信息

已知参数位置情况有两种

1. url路径中

2. post请求中的参数

最终返回结果

从入参中获取所有参数已 key :value 方式展示出

代码

get_params.py

#! /usr/bin/env python
# -*- coding: utf-8 -*-

import json


def get_params(url, data, json_b):
    params = {}
    if '?' in url and '=' in url:
        data = url.split('?')[1]
        if data[:1] == '?':
            data = data[1:]
    elif data:
        if json_b:
            params = data
        else:
            try:
                params = json.loads(data.replace('\'', '"'))
                return params
            except json.decoder.JSONDecodeError:
                pass
    else:
        return None
    if not params:
        parts = data.split('&')
        for part in parts:
            each = part.split('=')
            if len(each) < 2:
                each.append('')
            try:
                params[each[0]] = each[1]
            except IndexError:
                params = None
    return params

if __name__ == "__main__":
    #url = "http://172.16.12.129:8080/aaaa.php?aaa=3&bbb=4"
    url = "http://172.16.12.129:8080/aaaa.php"
    data = 'handle=<link rel=attachment href="file:///etc/passwd">&insert=Generate+%3E%3AD%3C'
    #data = {"handle": "aaa", "insert": "bbb"}
    print(get_params(url, data, isinstance(data, dict)))

从Burp Suite请求体中获取参数

import re
import json
import urllib.parse


class Requester(object):
    protocol   = "http"
    host       = ""
    method     = ""
    action     = ""
    headers    = {}
    data       = {}
    params     = {}

    """
    path: burp suite保存的请求头文件路径
    ssl: 判断http/https
    proxies: 代理
    """
    def __init__(self, path, uagent, ssl, proxies):
        try:
            # 读取请求文件
            with open(path, 'r') as f:
                content = f.read().strip()
        except IOError as e:
            print("找不到文件")
            exit()

        try:
            content = content.split('\n')
            # 解析 method 和 操作 URI
            regex = re.compile('(.*) (.*) HTTP')
            self.method, self.action = regex.findall(content[0])[0]   # 请求方法、请求路径

            # 分析标头
            for header in content[1:]:
                name, _, value = header.partition(': ')
                if not name or not value:
                    continue
                self.headers[name] = value
            self.host = self.headers['Host']

            # 分析 user-agent
            if uagent != None:
                self.headers['User-Agent'] = uagent
            
            # 分析 data(区分json、form、xml格式)
            self.data_to_dict(content[-1])

            # 处理HTTPS请求
            if ssl == True:
                self.protocol   = "https"
            
            self.proxies = proxies

            self.params = get_params(self.action, self.data, isinstance(self.data, dict))
        except Exception as e:
            print("错误的格式或原始数据 !")

    def data_to_dict(self, data):
        if self.method == "POST":

            # JSON 数据
            if self.headers['Content-Type'] and "application/json" in self.headers['Content-Type']:
                self.data = json.loads(data)

            # XML 数据
            elif self.headers['Content-Type'] and "application/xml" in self.headers['Content-Type']:
                self.data['__xml__'] = data

            # FORM 数据
            else:
                for arg in data.split("&"):
                    regex = re.compile('(.*)=(.*)')
                    for name,value in regex.findall(arg):
                        name = urllib.parse.unquote(name)
                        value = urllib.parse.unquote(value)
                        self.data[name] = value
文章来源:https://blog.csdn.net/u012206617/article/details/135368540
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。