File size: 4,190 Bytes
936541a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
593524c
 
 
936541a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2329cc5
 
 
 
936541a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
# coding=utf-8

import argparse
import json
import random
from ltp import LTP
from tqdm import tqdm

def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--input", type=str, required=True)
    parser.add_argument("--output", type=str, required=True)
    parser.add_argument("--ltp_model", type=str, required=True)
    parser.add_argument("--basic_hanzi", type=str, default="confusion/basic_hanzi_2500.txt")
    parser.add_argument("--sound_confusion", type=str, default="confusion/sound_confusion.txt")
    parser.add_argument("--shape_confusion", type=str, default="confusion/shape_confusion.txt")
    parser.add_argument("--same_ratio", type=float, default=0.1)
    parser.add_argument("--repeat_ratio", type=float, default=0.15)
    parser.add_argument("--delete_ratio", type=float, default=0.15)
    parser.add_argument("--sound_ratio", type=float, default=0.5)
    parser.add_argument("--shape_ratio", type=float, default=0.1)
    parser.add_argument("--whitelist", type=str, default="一二三四五六七八九十")
    parser.add_argument("--seed", type=int, default=42)
    args = parser.parse_args()
    return args

def isChinese(word):
    for ch in word:
        cp = ord(ch)
        if cp >= 0x4E00 and cp <= 0x9FA5:
            continue
        return False
    return True

def load_hanzi(path):
    hanzi = set()
    with open(path, mode="r", encoding="utf-8") as handle:
        for line in handle:
            line = line.strip()
            assert len(line) == 1
            hanzi.update(line)
    return hanzi

def load_confusion_set(path, hanzi):
    confusion_set = {}
    with open(path, mode="r", encoding="utf-8") as handle:
        for line in handle:
            line = line.strip().split()
            if len(line) < 2:   continue
            key, val = line[0], []
            for c in line[1]:
                if c in hanzi and c not in val and c != key:
                    val.append(c)
            if val:
                confusion_set[key] = val
    return confusion_set

def do_mask(sent, args):
    # 分词和词性标注
    cws, pos = args.ltp.pipeline(sent, tasks=["cws", "pos"], return_dict=False)
    
    n = len(cws)
    i = random.choice(range(n))
    word = cws[i]
    if not isChinese(word):
        i = random.choice(range(n))
        word = cws[i]
    if not isChinese(word):
        return sent

    p = random.random()
    p1 = args.same_ratio
    p2 = p1 + args.repeat_ratio
    p3 = p2 + args.delete_ratio
    p4 = p3 + args.sound_ratio
    p5 = p4 + args.shape_ratio
    assert abs(p5 - 1) < 0.001
    if p < p1:
        return sent
    if p < p2:
        # 字词冗余
        cws[i] += word
        return ''.join(cws)
    if pos[i] in ['nh', 'ns']:
        # 不修改人名地名
        return sent
    chars = list(word)
    k = random.choice(range(len(word)))
    c = chars[k]
    if c in args.whitelist:
        return sent
    if p < p3:
        if len(word) < 2:
            return sent
        chars[k] = ''
        cws[i] = ''.join(chars)
        return ''.join(cws)
    if p < p4:
        if c in args.sound_set:
            chars[k] = random.choice(args.sound_set[c])
    else:
        if c in args.shape_set:
            chars[k] = random.choice(args.shape_set[c])
    cws[i] = ''.join(chars)
    return ''.join(cws)

if __name__ == "__main__":
    args = parse_args()
    random.seed(args.seed)
    # 常用汉字集合
    hanzi = load_hanzi(args.basic_hanzi)

    # 混淆集
    args.sound_set = load_confusion_set(args.sound_confusion, hanzi)
    args.shape_set = load_confusion_set(args.shape_confusion, hanzi)
    args.hanzi = list(hanzi)

    # ltp中文分词模型
    args.ltp = LTP(args.ltp_model)

    output = open(args.output, mode="w")
    with open(args.input, mode="r", encoding="utf-8") as handle:
        for line in tqdm(handle):
            sent = line.strip()
            if len(sent) < 4:
                continue
            source = do_mask(sent, args)
            label = int(source != sent)
            output.write(
                json.dumps({"source": source, "target": sent, "label": label}, ensure_ascii=False)
            )
            output.write("\n")