拓扑数据的关键点识别算法

发布于:2024-06-01 ⋅ 阅读:(138) ⋅ 点赞:(0)

AI应用开发相关目录

本专栏包括AI应用开发相关内容分享,包括不限于AI算法部署实施细节、AI应用后端分析服务相关概念及开发技巧、AI应用后端应用服务相关概念及开发技巧、AI应用前端实现路径及开发技巧
适用于具备一定算法及Python使用基础的人群

  1. AI应用开发流程概述
  2. Visual Studio Code及Remote Development插件远程开发
  3. git开源项目的一些问题及镜像解决办法
  4. python实现UDP报文通信
  5. python实现日志生成及定期清理
  6. Linux终端命令Screen常见用法
  7. python实现redis数据存储
  8. python字符串转字典
  9. python实现文本向量化及文本相似度计算
  10. python对MySQL数据的常见使用
  11. 一文总结python的异常数据处理示例
  12. 基于selenium和bs4的通用数据采集技术(附代码)
  13. 基于python的知识图谱技术
  14. 一文理清python学习路径
  15. Linux、Git、Docker常用指令
  16. linux和windows系统下的python环境迁移
  17. linux下python服务定时(自)启动
  18. windows下基于python语言的TTS开发
  19. python opencv实现图像分割
  20. python使用API实现word文档翻译
  21. yolo-world:”目标检测届大模型“
  22. 爬虫进阶:多线程爬虫
  23. python使用modbustcp协议与PLC进行简单通信
  24. ChatTTS:开源语音合成项目
  25. sqlite性能考量及使用(附可视化操作软件)
  26. 拓扑数据的关键点识别算法


简介

分享一下之前写的拓扑网络/系统结构/图数据的关键点识别程序。

代码

import networkx as nx
import matplotlib.pyplot as plt
import AHP
import numpy as np
import wight

# 数据归一化
def normalization(data):
    _range = np.max(data) - np.min(data)
    return (data - np.min(data)) / _range

# 计算聚类系数
def julei(graph):
    nx.clustering(graph)
    score=nx.clustering(graph)
    output = []
    for node in score:
        output.append(score[node])
    print("聚类系数:",output)
    return normalization(output)

# 计算介数中心性
def topNBetweeness(graph):
    score = nx.betweenness_centrality(graph)
    output = []
    for node in score:
        output.append(score[node])
    print("介数中心性",output)
    return normalization(output)

# 计算接近中心性
def getClosenessCentrality(graph):
    closeness = []
    for i in graph.nodes():
        sumdij = 0;
        for j in graph.nodes():
            if (i != j):
                try:
                    sumdij += nx.dijkstra_path_length(graph, source=i, target=j)
                except:
                    continue
        di = sumdij / nx.number_of_nodes(graph)
        if di == 0:
            closeness.append((i, di))
        else:

            closeness.append((i, 1 / di))
    output = []

    for node in closeness:
        output.append(node[1])
    print("接近中心性",output)
    return normalization(output)

import numpy as np
import warnings
# 层次分析法
class AHP:
    def __init__(self, criteria, factors):
        self.RI = (0, 0, 0.58, 0.9, 1.12, 1.24, 1.32, 1.41, 1.45, 1.49)
        self.criteria = criteria  # 准则
        self.factors = factors  # 因素
        self.num_criteria = criteria.shape[0]
        self.num_factors = factors[0].shape[0]

    def cal_weights(self, input_matrix):
        input_matrix = np.array(input_matrix)
        n, n1 = input_matrix.shape
        assert n == n1, '不是一个方阵'
        for i in range(n):
            for j in range(n):
                if np.abs(input_matrix[i, j] * input_matrix[j, i] - 1) > 1e-7:
                    raise ValueError('不是反互对称矩阵')

        eigenvalues, eigenvectors = np.linalg.eig(input_matrix)

        max_idx = np.argmax(eigenvalues)
        max_eigen = eigenvalues[max_idx].real
        eigen = eigenvectors[:, max_idx].real
        eigen = eigen / eigen.sum()

        if n > 9:
            CR = None
            warnings.warn('无法判断一致性')
        else:
            CI = (max_eigen - n) / (n - 1)
            CR = CI / self.RI[n]
        return max_eigen, CR, eigen

    def run(self):
        max_eigen, CR, criteria_eigen = self.cal_weights(self.criteria)
        print('准则层:最大特征值{:<5f},CR={:<5f},检验{}通过'.format(max_eigen, CR, '' if CR < 0.1 else '不'))
        print('准则层权重={}\n'.format(criteria_eigen))

        max_eigen_list, CR_list, eigen_list = [], [], []
        k = 1
        for i in self.factors:
            max_eigen, CR, eigen = self.cal_weights(i)
            max_eigen_list.append(max_eigen)
            CR_list.append(CR)
            eigen_list.append(eigen)
            print('准则 {} 因素层:最大特征值{:<5f},CR={:<5f},检验{}通过'.format(k, max_eigen, CR, '' if CR < 0.1 else '不'))
            print('因素层权重={}\n'.format(eigen))

            k = k + 1

        return criteria_eigen, eigen_list

def ahp():
    # 准则重要性矩阵
    criteria = np.array([[1, 1/2],
                         [2, 1]])
    # 对每个准则,方案优劣排序
    b1 = np.array([[1 ,1/3, 1/5],[3 ,1, 1/3],[5, 3,1]])
    b2 = np.array([[1, 1], [1, 1]])
    b = [b1, b2]
    a, c = AHP(criteria, b).run()
    res=a[0]*c[0]
    res=np.append(res,a[1]*c[1])
    return res

import pandas as pd
import numpy as np
import math
from numpy import array
# 熵值法计算变量的权重
def cal_weight(x):
    # 矩阵计算--
    # 信息熵
    # 归一化操作已前置,所有输入的指标值都是经过归一化的
    x = array(x).T
    x=np.mat(x)
    # 求k
    rows = x.shape[0] # 行
    cols = x.shape[1]  # 列
    k = 1.0 / math.log(rows)
    lnf = [[None] * cols for i in range(rows)]
    lnf = array(lnf)
    x = array(x)
    for i in range(0, rows):
        for j in range(0, cols):
            if x[i][j] == 0:
                lnfij = 0.0
            else:
                p = x[i][j] / x.sum(axis=0)[j]
                lnfij = math.log(p) * p * (-k)
            lnf[i][j] = lnfij
    lnf = pd.DataFrame(lnf)
    E = lnf
    # 计算冗余度
    d = 1 - E.sum(axis=0)
    # 计算各指标的权重
    w = [[None] * 1 for i in range(cols)]
    for j in range(0, cols):
        wj = d[j] / sum(d)
        w[j] = wj
        # 计算各样本的综合得分,用最原始的数据
    print("熵权法权重:",w)
return w

#主程序
def main():
    # 输入拓扑结构(示例为81节点,100条边的系统)
    list_net_nodes = range(1, 82)
    list_net_edges = [(1, 2), (1, 3), (1, 5), (1, 77), (2, 4), (2, 6), (3, 9), (3, 24), (4, 9), (5, 10), (6, 10), (7, 8), (8, 9), (8, 10), (9, 11), (9, 12), (10, 12), (11, 13), (11, 14), (12, 13), (12, 23), (13, 23), (13, 80), (14, 16), (15, 16), (15, 21), (15, 24), (15, 77), (16, 17), (16, 19), (17, 18), (17, 22), (17, 81), (18, 21), (19, 20), (20, 23), (21, 22), (23, 79), (25, 26), (25, 77), (26, 77), (26, 27), (27, 34), (28, 29), (28, 34), (28, 35), (28, 78), (29, 30), (30, 78), (31, 32), (32, 33), (33, 34), (35, 36), (36, 37), (37, 38), (37, 39), (38, 44), (39, 40), (40, 41), (41, 42), (42, 80), (43, 44), (43, 81), (45, 56), (45, 79), (46, 81), (46, 48), (47, 48), (48, 49), (48, 52), (49, 50), (50, 51), (50, 54), (50, 55), (50, 60), (52, 53), (52, 56), (52, 58), (56, 57), (56, 66), (59, 60), (60, 64), (60, 68), (61, 64), (62, 68), (63, 64), (64, 69), (65, 66), (66, 67), (66, 71), (68, 72), (68, 73), (70, 71), (70, 77), (71, 72), (71, 75), (74, 75), (74, 78), (75, 76), (10, 11),]
    # 构建网络
    net_grid = nx.Graph()
    # 在网络图中添加节点,直接从节点列表中读取所有的节点
    net_grid.add_nodes_from(list_net_nodes)
    # 在网络中添加边,直接从边列表中读取所有的边
    net_grid.add_edges_from(list_net_edges)
    
    # 计算网络中的节点的介数中心性
    top=topNBetweeness(net_grid)
    # 计算聚类系数
    jl=julei(net_grid)
    # 计算接近中心性
    close = getClosenessCentrality(net_grid)
    # todo 输入潮流冲击影响度和压力变化影响度
    chaoliu1=[] # 应该和其他指标长度一致,如 jl=[1,1,1] chaoliu1=[1,1,1]
    chaoliu1= normalization(chaoliu1)
    chaoliu2=[]# 应该和其他指标长度一致,如 jl=[1,1,1] chaoliu1=[1,1,1]
    chaoliu1= normalization(chaoliu1)
    # 熵权法
    arr=[top,jl,close,chaoliu1,chaoliu2]
    sq=wight.cal_weight(arr)
    # 层次分析法
    ahp=AHP.ahp()
    # 计算综合权重
    qz=np.sqrt(sq*(ahp.T))/np.sum(np.sqrt(sq*(ahp.T)))
    print("综合权重:",qz)
    qz=np.mat(qz.T)
    res=qz*arr
    i=1
    r={}
    print("各节点权重:",r)
    for s in res.T:
        r[i]=float(s)
        i=i+1
    r=sorted(r.items(), key = lambda kv:(kv[1], kv[0]), reverse=True)
    print("各节点权重排序:",r)
if __name__ == '__main__':
    print(main())

网站公告

今日签到

点亮在社区的每一天
去签到